1 /* 2 * Network device driver for Cell Processor-Based Blade and Celleb platform 3 * 4 * (C) Copyright IBM Corp. 2005 5 * (C) Copyright 2006 TOSHIBA CORPORATION 6 * 7 * Authors : Utz Bacher <utz.bacher@de.ibm.com> 8 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25 #include <linux/compiler.h> 26 #include <linux/crc32.h> 27 #include <linux/delay.h> 28 #include <linux/etherdevice.h> 29 #include <linux/ethtool.h> 30 #include <linux/firmware.h> 31 #include <linux/if_vlan.h> 32 #include <linux/in.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/gfp.h> 36 #include <linux/ioport.h> 37 #include <linux/ip.h> 38 #include <linux/kernel.h> 39 #include <linux/mii.h> 40 #include <linux/module.h> 41 #include <linux/netdevice.h> 42 #include <linux/device.h> 43 #include <linux/pci.h> 44 #include <linux/skbuff.h> 45 #include <linux/tcp.h> 46 #include <linux/types.h> 47 #include <linux/vmalloc.h> 48 #include <linux/wait.h> 49 #include <linux/workqueue.h> 50 #include <linux/bitops.h> 51 #include <net/checksum.h> 52 53 #include "spider_net.h" 54 55 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \ 56 "<Jens.Osterkamp@de.ibm.com>"); 57 MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); 58 MODULE_LICENSE("GPL"); 59 MODULE_VERSION(VERSION); 60 MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME); 61 62 static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; 63 static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; 64 65 module_param(rx_descriptors, int, 0444); 66 module_param(tx_descriptors, int, 0444); 67 68 MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \ 69 "in rx chains"); 70 MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \ 71 "in tx chain"); 72 73 char spider_net_driver_name[] = "spidernet"; 74 75 static const struct pci_device_id spider_net_pci_tbl[] = { 76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, 77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 78 { 0, } 79 }; 80 81 MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); 82 83 /** 84 * spider_net_read_reg - reads an SMMIO register of a card 85 * @card: device structure 86 * @reg: register to read from 87 * 88 * returns the content of the specified SMMIO register. 89 */ 90 static inline u32 91 spider_net_read_reg(struct spider_net_card *card, u32 reg) 92 { 93 /* We use the powerpc specific variants instead of readl_be() because 94 * we know spidernet is not a real PCI device and we can thus avoid the 95 * performance hit caused by the PCI workarounds. 96 */ 97 return in_be32(card->regs + reg); 98 } 99 100 /** 101 * spider_net_write_reg - writes to an SMMIO register of a card 102 * @card: device structure 103 * @reg: register to write to 104 * @value: value to write into the specified SMMIO register 105 */ 106 static inline void 107 spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) 108 { 109 /* We use the powerpc specific variants instead of writel_be() because 110 * we know spidernet is not a real PCI device and we can thus avoid the 111 * performance hit caused by the PCI workarounds. 112 */ 113 out_be32(card->regs + reg, value); 114 } 115 116 /** 117 * spider_net_write_phy - write to phy register 118 * @netdev: adapter to be written to 119 * @mii_id: id of MII 120 * @reg: PHY register 121 * @val: value to be written to phy register 122 * 123 * spider_net_write_phy_register writes to an arbitrary PHY 124 * register via the spider GPCWOPCMD register. We assume the queue does 125 * not run full (not more than 15 commands outstanding). 126 **/ 127 static void 128 spider_net_write_phy(struct net_device *netdev, int mii_id, 129 int reg, int val) 130 { 131 struct spider_net_card *card = netdev_priv(netdev); 132 u32 writevalue; 133 134 writevalue = ((u32)mii_id << 21) | 135 ((u32)reg << 16) | ((u32)val); 136 137 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue); 138 } 139 140 /** 141 * spider_net_read_phy - read from phy register 142 * @netdev: network device to be read from 143 * @mii_id: id of MII 144 * @reg: PHY register 145 * 146 * Returns value read from PHY register 147 * 148 * spider_net_write_phy reads from an arbitrary PHY 149 * register via the spider GPCROPCMD register 150 **/ 151 static int 152 spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) 153 { 154 struct spider_net_card *card = netdev_priv(netdev); 155 u32 readvalue; 156 157 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16); 158 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue); 159 160 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT 161 * interrupt, as we poll for the completion of the read operation 162 * in spider_net_read_phy. Should take about 50 us */ 163 do { 164 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD); 165 } while (readvalue & SPIDER_NET_GPREXEC); 166 167 readvalue &= SPIDER_NET_GPRDAT_MASK; 168 169 return readvalue; 170 } 171 172 /** 173 * spider_net_setup_aneg - initial auto-negotiation setup 174 * @card: device structure 175 **/ 176 static void 177 spider_net_setup_aneg(struct spider_net_card *card) 178 { 179 struct mii_phy *phy = &card->phy; 180 u32 advertise = 0; 181 u16 bmsr, estat; 182 183 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); 184 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS); 185 186 if (bmsr & BMSR_10HALF) 187 advertise |= ADVERTISED_10baseT_Half; 188 if (bmsr & BMSR_10FULL) 189 advertise |= ADVERTISED_10baseT_Full; 190 if (bmsr & BMSR_100HALF) 191 advertise |= ADVERTISED_100baseT_Half; 192 if (bmsr & BMSR_100FULL) 193 advertise |= ADVERTISED_100baseT_Full; 194 195 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL)) 196 advertise |= SUPPORTED_1000baseT_Full; 197 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF)) 198 advertise |= SUPPORTED_1000baseT_Half; 199 200 sungem_phy_probe(phy, phy->mii_id); 201 phy->def->ops->setup_aneg(phy, advertise); 202 203 } 204 205 /** 206 * spider_net_rx_irq_off - switch off rx irq on this spider card 207 * @card: device structure 208 * 209 * switches off rx irq by masking them out in the GHIINTnMSK register 210 */ 211 static void 212 spider_net_rx_irq_off(struct spider_net_card *card) 213 { 214 u32 regvalue; 215 216 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT); 217 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); 218 } 219 220 /** 221 * spider_net_rx_irq_on - switch on rx irq on this spider card 222 * @card: device structure 223 * 224 * switches on rx irq by enabling them in the GHIINTnMSK register 225 */ 226 static void 227 spider_net_rx_irq_on(struct spider_net_card *card) 228 { 229 u32 regvalue; 230 231 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT; 232 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); 233 } 234 235 /** 236 * spider_net_set_promisc - sets the unicast address or the promiscuous mode 237 * @card: card structure 238 * 239 * spider_net_set_promisc sets the unicast destination address filter and 240 * thus either allows for non-promisc mode or promisc mode 241 */ 242 static void 243 spider_net_set_promisc(struct spider_net_card *card) 244 { 245 u32 macu, macl; 246 struct net_device *netdev = card->netdev; 247 248 if (netdev->flags & IFF_PROMISC) { 249 /* clear destination entry 0 */ 250 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0); 251 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0); 252 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 253 SPIDER_NET_PROMISC_VALUE); 254 } else { 255 macu = netdev->dev_addr[0]; 256 macu <<= 8; 257 macu |= netdev->dev_addr[1]; 258 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl)); 259 260 macu |= SPIDER_NET_UA_DESCR_VALUE; 261 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu); 262 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl); 263 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 264 SPIDER_NET_NONPROMISC_VALUE); 265 } 266 } 267 268 /** 269 * spider_net_get_descr_status -- returns the status of a descriptor 270 * @descr: descriptor to look at 271 * 272 * returns the status as in the dmac_cmd_status field of the descriptor 273 */ 274 static inline int 275 spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr) 276 { 277 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; 278 } 279 280 /** 281 * spider_net_free_chain - free descriptor chain 282 * @card: card structure 283 * @chain: address of chain 284 * 285 */ 286 static void 287 spider_net_free_chain(struct spider_net_card *card, 288 struct spider_net_descr_chain *chain) 289 { 290 struct spider_net_descr *descr; 291 292 descr = chain->ring; 293 do { 294 descr->bus_addr = 0; 295 descr->hwdescr->next_descr_addr = 0; 296 descr = descr->next; 297 } while (descr != chain->ring); 298 299 dma_free_coherent(&card->pdev->dev, chain->num_desc, 300 chain->hwring, chain->dma_addr); 301 } 302 303 /** 304 * spider_net_init_chain - alloc and link descriptor chain 305 * @card: card structure 306 * @chain: address of chain 307 * 308 * We manage a circular list that mirrors the hardware structure, 309 * except that the hardware uses bus addresses. 310 * 311 * Returns 0 on success, <0 on failure 312 */ 313 static int 314 spider_net_init_chain(struct spider_net_card *card, 315 struct spider_net_descr_chain *chain) 316 { 317 int i; 318 struct spider_net_descr *descr; 319 struct spider_net_hw_descr *hwdescr; 320 dma_addr_t buf; 321 size_t alloc_size; 322 323 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); 324 325 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, 326 &chain->dma_addr, GFP_KERNEL); 327 if (!chain->hwring) 328 return -ENOMEM; 329 330 memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); 331 332 /* Set up the hardware pointers in each descriptor */ 333 descr = chain->ring; 334 hwdescr = chain->hwring; 335 buf = chain->dma_addr; 336 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) { 337 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 338 hwdescr->next_descr_addr = 0; 339 340 descr->hwdescr = hwdescr; 341 descr->bus_addr = buf; 342 descr->next = descr + 1; 343 descr->prev = descr - 1; 344 345 buf += sizeof(struct spider_net_hw_descr); 346 } 347 /* do actual circular list */ 348 (descr-1)->next = chain->ring; 349 chain->ring->prev = descr-1; 350 351 spin_lock_init(&chain->lock); 352 chain->head = chain->ring; 353 chain->tail = chain->ring; 354 return 0; 355 } 356 357 /** 358 * spider_net_free_rx_chain_contents - frees descr contents in rx chain 359 * @card: card structure 360 * 361 * returns 0 on success, <0 on failure 362 */ 363 static void 364 spider_net_free_rx_chain_contents(struct spider_net_card *card) 365 { 366 struct spider_net_descr *descr; 367 368 descr = card->rx_chain.head; 369 do { 370 if (descr->skb) { 371 pci_unmap_single(card->pdev, descr->hwdescr->buf_addr, 372 SPIDER_NET_MAX_FRAME, 373 PCI_DMA_BIDIRECTIONAL); 374 dev_kfree_skb(descr->skb); 375 descr->skb = NULL; 376 } 377 descr = descr->next; 378 } while (descr != card->rx_chain.head); 379 } 380 381 /** 382 * spider_net_prepare_rx_descr - Reinitialize RX descriptor 383 * @card: card structure 384 * @descr: descriptor to re-init 385 * 386 * Return 0 on success, <0 on failure. 387 * 388 * Allocates a new rx skb, iommu-maps it and attaches it to the 389 * descriptor. Mark the descriptor as activated, ready-to-use. 390 */ 391 static int 392 spider_net_prepare_rx_descr(struct spider_net_card *card, 393 struct spider_net_descr *descr) 394 { 395 struct spider_net_hw_descr *hwdescr = descr->hwdescr; 396 dma_addr_t buf; 397 int offset; 398 int bufsize; 399 400 /* we need to round up the buffer size to a multiple of 128 */ 401 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) & 402 (~(SPIDER_NET_RXBUF_ALIGN - 1)); 403 404 /* and we need to have it 128 byte aligned, therefore we allocate a 405 * bit more */ 406 /* allocate an skb */ 407 descr->skb = netdev_alloc_skb(card->netdev, 408 bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 409 if (!descr->skb) { 410 if (netif_msg_rx_err(card) && net_ratelimit()) 411 dev_err(&card->netdev->dev, 412 "Not enough memory to allocate rx buffer\n"); 413 card->spider_stats.alloc_rx_skb_error++; 414 return -ENOMEM; 415 } 416 hwdescr->buf_size = bufsize; 417 hwdescr->result_size = 0; 418 hwdescr->valid_size = 0; 419 hwdescr->data_status = 0; 420 hwdescr->data_error = 0; 421 422 offset = ((unsigned long)descr->skb->data) & 423 (SPIDER_NET_RXBUF_ALIGN - 1); 424 if (offset) 425 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 426 /* iommu-map the skb */ 427 buf = pci_map_single(card->pdev, descr->skb->data, 428 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 429 if (pci_dma_mapping_error(card->pdev, buf)) { 430 dev_kfree_skb_any(descr->skb); 431 descr->skb = NULL; 432 if (netif_msg_rx_err(card) && net_ratelimit()) 433 dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n"); 434 card->spider_stats.rx_iommu_map_error++; 435 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 436 } else { 437 hwdescr->buf_addr = buf; 438 wmb(); 439 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 440 SPIDER_NET_DMAC_NOINTR_COMPLETE; 441 } 442 443 return 0; 444 } 445 446 /** 447 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses 448 * @card: card structure 449 * 450 * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the 451 * chip by writing to the appropriate register. DMA is enabled in 452 * spider_net_enable_rxdmac. 453 */ 454 static inline void 455 spider_net_enable_rxchtails(struct spider_net_card *card) 456 { 457 /* assume chain is aligned correctly */ 458 spider_net_write_reg(card, SPIDER_NET_GDADCHA , 459 card->rx_chain.tail->bus_addr); 460 } 461 462 /** 463 * spider_net_enable_rxdmac - enables a receive DMA controller 464 * @card: card structure 465 * 466 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 467 * in the GDADMACCNTR register 468 */ 469 static inline void 470 spider_net_enable_rxdmac(struct spider_net_card *card) 471 { 472 wmb(); 473 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 474 SPIDER_NET_DMA_RX_VALUE); 475 } 476 477 /** 478 * spider_net_disable_rxdmac - disables the receive DMA controller 479 * @card: card structure 480 * 481 * spider_net_disable_rxdmac terminates processing on the DMA controller 482 * by turing off the DMA controller, with the force-end flag set. 483 */ 484 static inline void 485 spider_net_disable_rxdmac(struct spider_net_card *card) 486 { 487 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 488 SPIDER_NET_DMA_RX_FEND_VALUE); 489 } 490 491 /** 492 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 493 * @card: card structure 494 * 495 * refills descriptors in the rx chain: allocates skbs and iommu-maps them. 496 */ 497 static void 498 spider_net_refill_rx_chain(struct spider_net_card *card) 499 { 500 struct spider_net_descr_chain *chain = &card->rx_chain; 501 unsigned long flags; 502 503 /* one context doing the refill (and a second context seeing that 504 * and omitting it) is ok. If called by NAPI, we'll be called again 505 * as spider_net_decode_one_descr is called several times. If some 506 * interrupt calls us, the NAPI is about to clean up anyway. */ 507 if (!spin_trylock_irqsave(&chain->lock, flags)) 508 return; 509 510 while (spider_net_get_descr_status(chain->head->hwdescr) == 511 SPIDER_NET_DESCR_NOT_IN_USE) { 512 if (spider_net_prepare_rx_descr(card, chain->head)) 513 break; 514 chain->head = chain->head->next; 515 } 516 517 spin_unlock_irqrestore(&chain->lock, flags); 518 } 519 520 /** 521 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains 522 * @card: card structure 523 * 524 * Returns 0 on success, <0 on failure. 525 */ 526 static int 527 spider_net_alloc_rx_skbs(struct spider_net_card *card) 528 { 529 struct spider_net_descr_chain *chain = &card->rx_chain; 530 struct spider_net_descr *start = chain->tail; 531 struct spider_net_descr *descr = start; 532 533 /* Link up the hardware chain pointers */ 534 do { 535 descr->prev->hwdescr->next_descr_addr = descr->bus_addr; 536 descr = descr->next; 537 } while (descr != start); 538 539 /* Put at least one buffer into the chain. if this fails, 540 * we've got a problem. If not, spider_net_refill_rx_chain 541 * will do the rest at the end of this function. */ 542 if (spider_net_prepare_rx_descr(card, chain->head)) 543 goto error; 544 else 545 chain->head = chain->head->next; 546 547 /* This will allocate the rest of the rx buffers; 548 * if not, it's business as usual later on. */ 549 spider_net_refill_rx_chain(card); 550 spider_net_enable_rxdmac(card); 551 return 0; 552 553 error: 554 spider_net_free_rx_chain_contents(card); 555 return -ENOMEM; 556 } 557 558 /** 559 * spider_net_get_multicast_hash - generates hash for multicast filter table 560 * @addr: multicast address 561 * 562 * returns the hash value. 563 * 564 * spider_net_get_multicast_hash calculates a hash value for a given multicast 565 * address, that is used to set the multicast filter tables 566 */ 567 static u8 568 spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) 569 { 570 u32 crc; 571 u8 hash; 572 char addr_for_crc[ETH_ALEN] = { 0, }; 573 int i, bit; 574 575 for (i = 0; i < ETH_ALEN * 8; i++) { 576 bit = (addr[i / 8] >> (i % 8)) & 1; 577 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8)); 578 } 579 580 crc = crc32_be(~0, addr_for_crc, netdev->addr_len); 581 582 hash = (crc >> 27); 583 hash <<= 3; 584 hash |= crc & 7; 585 hash &= 0xff; 586 587 return hash; 588 } 589 590 /** 591 * spider_net_set_multi - sets multicast addresses and promisc flags 592 * @netdev: interface device structure 593 * 594 * spider_net_set_multi configures multicast addresses as needed for the 595 * netdev interface. It also sets up multicast, allmulti and promisc 596 * flags appropriately 597 */ 598 static void 599 spider_net_set_multi(struct net_device *netdev) 600 { 601 struct netdev_hw_addr *ha; 602 u8 hash; 603 int i; 604 u32 reg; 605 struct spider_net_card *card = netdev_priv(netdev); 606 DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {}; 607 608 spider_net_set_promisc(card); 609 610 if (netdev->flags & IFF_ALLMULTI) { 611 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) { 612 set_bit(i, bitmask); 613 } 614 goto write_hash; 615 } 616 617 /* well, we know, what the broadcast hash value is: it's xfd 618 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ 619 set_bit(0xfd, bitmask); 620 621 netdev_for_each_mc_addr(ha, netdev) { 622 hash = spider_net_get_multicast_hash(netdev, ha->addr); 623 set_bit(hash, bitmask); 624 } 625 626 write_hash: 627 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) { 628 reg = 0; 629 if (test_bit(i * 4, bitmask)) 630 reg += 0x08; 631 reg <<= 8; 632 if (test_bit(i * 4 + 1, bitmask)) 633 reg += 0x08; 634 reg <<= 8; 635 if (test_bit(i * 4 + 2, bitmask)) 636 reg += 0x08; 637 reg <<= 8; 638 if (test_bit(i * 4 + 3, bitmask)) 639 reg += 0x08; 640 641 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg); 642 } 643 } 644 645 /** 646 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 647 * @card: card structure 648 * @skb: packet to use 649 * 650 * returns 0 on success, <0 on failure. 651 * 652 * fills out the descriptor structure with skb data and len. Copies data, 653 * if needed (32bit DMA!) 654 */ 655 static int 656 spider_net_prepare_tx_descr(struct spider_net_card *card, 657 struct sk_buff *skb) 658 { 659 struct spider_net_descr_chain *chain = &card->tx_chain; 660 struct spider_net_descr *descr; 661 struct spider_net_hw_descr *hwdescr; 662 dma_addr_t buf; 663 unsigned long flags; 664 665 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 666 if (pci_dma_mapping_error(card->pdev, buf)) { 667 if (netif_msg_tx_err(card) && net_ratelimit()) 668 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " 669 "Dropping packet\n", skb->data, skb->len); 670 card->spider_stats.tx_iommu_map_error++; 671 return -ENOMEM; 672 } 673 674 spin_lock_irqsave(&chain->lock, flags); 675 descr = card->tx_chain.head; 676 if (descr->next == chain->tail->prev) { 677 spin_unlock_irqrestore(&chain->lock, flags); 678 pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE); 679 return -ENOMEM; 680 } 681 hwdescr = descr->hwdescr; 682 chain->head = descr->next; 683 684 descr->skb = skb; 685 hwdescr->buf_addr = buf; 686 hwdescr->buf_size = skb->len; 687 hwdescr->next_descr_addr = 0; 688 hwdescr->data_status = 0; 689 690 hwdescr->dmac_cmd_status = 691 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL; 692 spin_unlock_irqrestore(&chain->lock, flags); 693 694 if (skb->ip_summed == CHECKSUM_PARTIAL) 695 switch (ip_hdr(skb)->protocol) { 696 case IPPROTO_TCP: 697 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; 698 break; 699 case IPPROTO_UDP: 700 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; 701 break; 702 } 703 704 /* Chain the bus address, so that the DMA engine finds this descr. */ 705 wmb(); 706 descr->prev->hwdescr->next_descr_addr = descr->bus_addr; 707 708 netif_trans_update(card->netdev); /* set netdev watchdog timer */ 709 return 0; 710 } 711 712 static int 713 spider_net_set_low_watermark(struct spider_net_card *card) 714 { 715 struct spider_net_descr *descr = card->tx_chain.tail; 716 struct spider_net_hw_descr *hwdescr; 717 unsigned long flags; 718 int status; 719 int cnt=0; 720 int i; 721 722 /* Measure the length of the queue. Measurement does not 723 * need to be precise -- does not need a lock. */ 724 while (descr != card->tx_chain.head) { 725 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; 726 if (status == SPIDER_NET_DESCR_NOT_IN_USE) 727 break; 728 descr = descr->next; 729 cnt++; 730 } 731 732 /* If TX queue is short, don't even bother with interrupts */ 733 if (cnt < card->tx_chain.num_desc/4) 734 return cnt; 735 736 /* Set low-watermark 3/4th's of the way into the queue. */ 737 descr = card->tx_chain.tail; 738 cnt = (cnt*3)/4; 739 for (i=0;i<cnt; i++) 740 descr = descr->next; 741 742 /* Set the new watermark, clear the old watermark */ 743 spin_lock_irqsave(&card->tx_chain.lock, flags); 744 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; 745 if (card->low_watermark && card->low_watermark != descr) { 746 hwdescr = card->low_watermark->hwdescr; 747 hwdescr->dmac_cmd_status = 748 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; 749 } 750 card->low_watermark = descr; 751 spin_unlock_irqrestore(&card->tx_chain.lock, flags); 752 return cnt; 753 } 754 755 /** 756 * spider_net_release_tx_chain - processes sent tx descriptors 757 * @card: adapter structure 758 * @brutal: if set, don't care about whether descriptor seems to be in use 759 * 760 * returns 0 if the tx ring is empty, otherwise 1. 761 * 762 * spider_net_release_tx_chain releases the tx descriptors that spider has 763 * finished with (if non-brutal) or simply release tx descriptors (if brutal). 764 * If some other context is calling this function, we return 1 so that we're 765 * scheduled again (if we were scheduled) and will not lose initiative. 766 */ 767 static int 768 spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 769 { 770 struct net_device *dev = card->netdev; 771 struct spider_net_descr_chain *chain = &card->tx_chain; 772 struct spider_net_descr *descr; 773 struct spider_net_hw_descr *hwdescr; 774 struct sk_buff *skb; 775 u32 buf_addr; 776 unsigned long flags; 777 int status; 778 779 while (1) { 780 spin_lock_irqsave(&chain->lock, flags); 781 if (chain->tail == chain->head) { 782 spin_unlock_irqrestore(&chain->lock, flags); 783 return 0; 784 } 785 descr = chain->tail; 786 hwdescr = descr->hwdescr; 787 788 status = spider_net_get_descr_status(hwdescr); 789 switch (status) { 790 case SPIDER_NET_DESCR_COMPLETE: 791 dev->stats.tx_packets++; 792 dev->stats.tx_bytes += descr->skb->len; 793 break; 794 795 case SPIDER_NET_DESCR_CARDOWNED: 796 if (!brutal) { 797 spin_unlock_irqrestore(&chain->lock, flags); 798 return 1; 799 } 800 801 /* fallthrough, if we release the descriptors 802 * brutally (then we don't care about 803 * SPIDER_NET_DESCR_CARDOWNED) */ 804 805 case SPIDER_NET_DESCR_RESPONSE_ERROR: 806 case SPIDER_NET_DESCR_PROTECTION_ERROR: 807 case SPIDER_NET_DESCR_FORCE_END: 808 if (netif_msg_tx_err(card)) 809 dev_err(&card->netdev->dev, "forcing end of tx descriptor " 810 "with status x%02x\n", status); 811 dev->stats.tx_errors++; 812 break; 813 814 default: 815 dev->stats.tx_dropped++; 816 if (!brutal) { 817 spin_unlock_irqrestore(&chain->lock, flags); 818 return 1; 819 } 820 } 821 822 chain->tail = descr->next; 823 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 824 skb = descr->skb; 825 descr->skb = NULL; 826 buf_addr = hwdescr->buf_addr; 827 spin_unlock_irqrestore(&chain->lock, flags); 828 829 /* unmap the skb */ 830 if (skb) { 831 pci_unmap_single(card->pdev, buf_addr, skb->len, 832 PCI_DMA_TODEVICE); 833 dev_consume_skb_any(skb); 834 } 835 } 836 return 0; 837 } 838 839 /** 840 * spider_net_kick_tx_dma - enables TX DMA processing 841 * @card: card structure 842 * 843 * This routine will start the transmit DMA running if 844 * it is not already running. This routine ned only be 845 * called when queueing a new packet to an empty tx queue. 846 * Writes the current tx chain head as start address 847 * of the tx descriptor chain and enables the transmission 848 * DMA engine. 849 */ 850 static inline void 851 spider_net_kick_tx_dma(struct spider_net_card *card) 852 { 853 struct spider_net_descr *descr; 854 855 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & 856 SPIDER_NET_TX_DMA_EN) 857 goto out; 858 859 descr = card->tx_chain.tail; 860 for (;;) { 861 if (spider_net_get_descr_status(descr->hwdescr) == 862 SPIDER_NET_DESCR_CARDOWNED) { 863 spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 864 descr->bus_addr); 865 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 866 SPIDER_NET_DMA_TX_VALUE); 867 break; 868 } 869 if (descr == card->tx_chain.head) 870 break; 871 descr = descr->next; 872 } 873 874 out: 875 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 876 } 877 878 /** 879 * spider_net_xmit - transmits a frame over the device 880 * @skb: packet to send out 881 * @netdev: interface device structure 882 * 883 * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure 884 */ 885 static netdev_tx_t 886 spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 887 { 888 int cnt; 889 struct spider_net_card *card = netdev_priv(netdev); 890 891 spider_net_release_tx_chain(card, 0); 892 893 if (spider_net_prepare_tx_descr(card, skb) != 0) { 894 netdev->stats.tx_dropped++; 895 netif_stop_queue(netdev); 896 return NETDEV_TX_BUSY; 897 } 898 899 cnt = spider_net_set_low_watermark(card); 900 if (cnt < 5) 901 spider_net_kick_tx_dma(card); 902 return NETDEV_TX_OK; 903 } 904 905 /** 906 * spider_net_cleanup_tx_ring - cleans up the TX ring 907 * @card: card structure 908 * 909 * spider_net_cleanup_tx_ring is called by either the tx_timer 910 * or from the NAPI polling routine. 911 * This routine releases resources associted with transmitted 912 * packets, including updating the queue tail pointer. 913 */ 914 static void 915 spider_net_cleanup_tx_ring(struct timer_list *t) 916 { 917 struct spider_net_card *card = from_timer(card, t, tx_timer); 918 if ((spider_net_release_tx_chain(card, 0) != 0) && 919 (card->netdev->flags & IFF_UP)) { 920 spider_net_kick_tx_dma(card); 921 netif_wake_queue(card->netdev); 922 } 923 } 924 925 /** 926 * spider_net_do_ioctl - called for device ioctls 927 * @netdev: interface device structure 928 * @ifr: request parameter structure for ioctl 929 * @cmd: command code for ioctl 930 * 931 * returns 0 on success, <0 on failure. Currently, we have no special ioctls. 932 * -EOPNOTSUPP is returned, if an unknown ioctl was requested 933 */ 934 static int 935 spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 936 { 937 switch (cmd) { 938 default: 939 return -EOPNOTSUPP; 940 } 941 } 942 943 /** 944 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 945 * @descr: descriptor to process 946 * @card: card structure 947 * 948 * Fills out skb structure and passes the data to the stack. 949 * The descriptor state is not changed. 950 */ 951 static void 952 spider_net_pass_skb_up(struct spider_net_descr *descr, 953 struct spider_net_card *card) 954 { 955 struct spider_net_hw_descr *hwdescr = descr->hwdescr; 956 struct sk_buff *skb = descr->skb; 957 struct net_device *netdev = card->netdev; 958 u32 data_status = hwdescr->data_status; 959 u32 data_error = hwdescr->data_error; 960 961 skb_put(skb, hwdescr->valid_size); 962 963 /* the card seems to add 2 bytes of junk in front 964 * of the ethernet frame */ 965 #define SPIDER_MISALIGN 2 966 skb_pull(skb, SPIDER_MISALIGN); 967 skb->protocol = eth_type_trans(skb, netdev); 968 969 /* checksum offload */ 970 skb_checksum_none_assert(skb); 971 if (netdev->features & NETIF_F_RXCSUM) { 972 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) == 973 SPIDER_NET_DATA_STATUS_CKSUM_MASK) && 974 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) 975 skb->ip_summed = CHECKSUM_UNNECESSARY; 976 } 977 978 if (data_status & SPIDER_NET_VLAN_PACKET) { 979 /* further enhancements: HW-accel VLAN */ 980 } 981 982 /* update netdevice statistics */ 983 netdev->stats.rx_packets++; 984 netdev->stats.rx_bytes += skb->len; 985 986 /* pass skb up to stack */ 987 netif_receive_skb(skb); 988 } 989 990 static void show_rx_chain(struct spider_net_card *card) 991 { 992 struct spider_net_descr_chain *chain = &card->rx_chain; 993 struct spider_net_descr *start= chain->tail; 994 struct spider_net_descr *descr= start; 995 struct spider_net_hw_descr *hwd = start->hwdescr; 996 struct device *dev = &card->netdev->dev; 997 u32 curr_desc, next_desc; 998 int status; 999 1000 int tot = 0; 1001 int cnt = 0; 1002 int off = start - chain->ring; 1003 int cstat = hwd->dmac_cmd_status; 1004 1005 dev_info(dev, "Total number of descrs=%d\n", 1006 chain->num_desc); 1007 dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n", 1008 off, cstat); 1009 1010 curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA); 1011 next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA); 1012 1013 status = cstat; 1014 do 1015 { 1016 hwd = descr->hwdescr; 1017 off = descr - chain->ring; 1018 status = hwd->dmac_cmd_status; 1019 1020 if (descr == chain->head) 1021 dev_info(dev, "Chain head is at %d, head status=0x%x\n", 1022 off, status); 1023 1024 if (curr_desc == descr->bus_addr) 1025 dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n", 1026 off, status); 1027 1028 if (next_desc == descr->bus_addr) 1029 dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n", 1030 off, status); 1031 1032 if (hwd->next_descr_addr == 0) 1033 dev_info(dev, "chain is cut at %d\n", off); 1034 1035 if (cstat != status) { 1036 int from = (chain->num_desc + off - cnt) % chain->num_desc; 1037 int to = (chain->num_desc + off - 1) % chain->num_desc; 1038 dev_info(dev, "Have %d (from %d to %d) descrs " 1039 "with stat=0x%08x\n", cnt, from, to, cstat); 1040 cstat = status; 1041 cnt = 0; 1042 } 1043 1044 cnt ++; 1045 tot ++; 1046 descr = descr->next; 1047 } while (descr != start); 1048 1049 dev_info(dev, "Last %d descrs with stat=0x%08x " 1050 "for a total of %d descrs\n", cnt, cstat, tot); 1051 1052 #ifdef DEBUG 1053 /* Now dump the whole ring */ 1054 descr = start; 1055 do 1056 { 1057 struct spider_net_hw_descr *hwd = descr->hwdescr; 1058 status = spider_net_get_descr_status(hwd); 1059 cnt = descr - chain->ring; 1060 dev_info(dev, "Descr %d stat=0x%08x skb=%p\n", 1061 cnt, status, descr->skb); 1062 dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n", 1063 descr->bus_addr, hwd->buf_addr, hwd->buf_size); 1064 dev_info(dev, "next=%08x result sz=%d valid sz=%d\n", 1065 hwd->next_descr_addr, hwd->result_size, 1066 hwd->valid_size); 1067 dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n", 1068 hwd->dmac_cmd_status, hwd->data_status, 1069 hwd->data_error); 1070 dev_info(dev, "\n"); 1071 1072 descr = descr->next; 1073 } while (descr != start); 1074 #endif 1075 1076 } 1077 1078 /** 1079 * spider_net_resync_head_ptr - Advance head ptr past empty descrs 1080 * 1081 * If the driver fails to keep up and empty the queue, then the 1082 * hardware wil run out of room to put incoming packets. This 1083 * will cause the hardware to skip descrs that are full (instead 1084 * of halting/retrying). Thus, once the driver runs, it wil need 1085 * to "catch up" to where the hardware chain pointer is at. 1086 */ 1087 static void spider_net_resync_head_ptr(struct spider_net_card *card) 1088 { 1089 unsigned long flags; 1090 struct spider_net_descr_chain *chain = &card->rx_chain; 1091 struct spider_net_descr *descr; 1092 int i, status; 1093 1094 /* Advance head pointer past any empty descrs */ 1095 descr = chain->head; 1096 status = spider_net_get_descr_status(descr->hwdescr); 1097 1098 if (status == SPIDER_NET_DESCR_NOT_IN_USE) 1099 return; 1100 1101 spin_lock_irqsave(&chain->lock, flags); 1102 1103 descr = chain->head; 1104 status = spider_net_get_descr_status(descr->hwdescr); 1105 for (i=0; i<chain->num_desc; i++) { 1106 if (status != SPIDER_NET_DESCR_CARDOWNED) break; 1107 descr = descr->next; 1108 status = spider_net_get_descr_status(descr->hwdescr); 1109 } 1110 chain->head = descr; 1111 1112 spin_unlock_irqrestore(&chain->lock, flags); 1113 } 1114 1115 static int spider_net_resync_tail_ptr(struct spider_net_card *card) 1116 { 1117 struct spider_net_descr_chain *chain = &card->rx_chain; 1118 struct spider_net_descr *descr; 1119 int i, status; 1120 1121 /* Advance tail pointer past any empty and reaped descrs */ 1122 descr = chain->tail; 1123 status = spider_net_get_descr_status(descr->hwdescr); 1124 1125 for (i=0; i<chain->num_desc; i++) { 1126 if ((status != SPIDER_NET_DESCR_CARDOWNED) && 1127 (status != SPIDER_NET_DESCR_NOT_IN_USE)) break; 1128 descr = descr->next; 1129 status = spider_net_get_descr_status(descr->hwdescr); 1130 } 1131 chain->tail = descr; 1132 1133 if ((i == chain->num_desc) || (i == 0)) 1134 return 1; 1135 return 0; 1136 } 1137 1138 /** 1139 * spider_net_decode_one_descr - processes an RX descriptor 1140 * @card: card structure 1141 * 1142 * Returns 1 if a packet has been sent to the stack, otherwise 0. 1143 * 1144 * Processes an RX descriptor by iommu-unmapping the data buffer 1145 * and passing the packet up to the stack. This function is called 1146 * in softirq context, e.g. either bottom half from interrupt or 1147 * NAPI polling context. 1148 */ 1149 static int 1150 spider_net_decode_one_descr(struct spider_net_card *card) 1151 { 1152 struct net_device *dev = card->netdev; 1153 struct spider_net_descr_chain *chain = &card->rx_chain; 1154 struct spider_net_descr *descr = chain->tail; 1155 struct spider_net_hw_descr *hwdescr = descr->hwdescr; 1156 u32 hw_buf_addr; 1157 int status; 1158 1159 status = spider_net_get_descr_status(hwdescr); 1160 1161 /* Nothing in the descriptor, or ring must be empty */ 1162 if ((status == SPIDER_NET_DESCR_CARDOWNED) || 1163 (status == SPIDER_NET_DESCR_NOT_IN_USE)) 1164 return 0; 1165 1166 /* descriptor definitively used -- move on tail */ 1167 chain->tail = descr->next; 1168 1169 /* unmap descriptor */ 1170 hw_buf_addr = hwdescr->buf_addr; 1171 hwdescr->buf_addr = 0xffffffff; 1172 pci_unmap_single(card->pdev, hw_buf_addr, 1173 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 1174 1175 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1176 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || 1177 (status == SPIDER_NET_DESCR_FORCE_END) ) { 1178 if (netif_msg_rx_err(card)) 1179 dev_err(&dev->dev, 1180 "dropping RX descriptor with state %d\n", status); 1181 dev->stats.rx_dropped++; 1182 goto bad_desc; 1183 } 1184 1185 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1186 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1187 if (netif_msg_rx_err(card)) 1188 dev_err(&card->netdev->dev, 1189 "RX descriptor with unknown state %d\n", status); 1190 card->spider_stats.rx_desc_unk_state++; 1191 goto bad_desc; 1192 } 1193 1194 /* The cases we'll throw away the packet immediately */ 1195 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 1196 if (netif_msg_rx_err(card)) 1197 dev_err(&card->netdev->dev, 1198 "error in received descriptor found, " 1199 "data_status=x%08x, data_error=x%08x\n", 1200 hwdescr->data_status, hwdescr->data_error); 1201 goto bad_desc; 1202 } 1203 1204 if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) { 1205 dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n", 1206 hwdescr->dmac_cmd_status); 1207 pr_err("buf_addr=x%08x\n", hw_buf_addr); 1208 pr_err("buf_size=x%08x\n", hwdescr->buf_size); 1209 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr); 1210 pr_err("result_size=x%08x\n", hwdescr->result_size); 1211 pr_err("valid_size=x%08x\n", hwdescr->valid_size); 1212 pr_err("data_status=x%08x\n", hwdescr->data_status); 1213 pr_err("data_error=x%08x\n", hwdescr->data_error); 1214 pr_err("which=%ld\n", descr - card->rx_chain.ring); 1215 1216 card->spider_stats.rx_desc_error++; 1217 goto bad_desc; 1218 } 1219 1220 /* Ok, we've got a packet in descr */ 1221 spider_net_pass_skb_up(descr, card); 1222 descr->skb = NULL; 1223 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1224 return 1; 1225 1226 bad_desc: 1227 if (netif_msg_rx_err(card)) 1228 show_rx_chain(card); 1229 dev_kfree_skb_irq(descr->skb); 1230 descr->skb = NULL; 1231 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1232 return 0; 1233 } 1234 1235 /** 1236 * spider_net_poll - NAPI poll function called by the stack to return packets 1237 * @netdev: interface device structure 1238 * @budget: number of packets we can pass to the stack at most 1239 * 1240 * returns 0 if no more packets available to the driver/stack. Returns 1, 1241 * if the quota is exceeded, but the driver has still packets. 1242 * 1243 * spider_net_poll returns all packets from the rx descriptors to the stack 1244 * (using netif_receive_skb). If all/enough packets are up, the driver 1245 * reenables interrupts and returns 0. If not, 1 is returned. 1246 */ 1247 static int spider_net_poll(struct napi_struct *napi, int budget) 1248 { 1249 struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); 1250 int packets_done = 0; 1251 1252 while (packets_done < budget) { 1253 if (!spider_net_decode_one_descr(card)) 1254 break; 1255 1256 packets_done++; 1257 } 1258 1259 if ((packets_done == 0) && (card->num_rx_ints != 0)) { 1260 if (!spider_net_resync_tail_ptr(card)) 1261 packets_done = budget; 1262 spider_net_resync_head_ptr(card); 1263 } 1264 card->num_rx_ints = 0; 1265 1266 spider_net_refill_rx_chain(card); 1267 spider_net_enable_rxdmac(card); 1268 1269 spider_net_cleanup_tx_ring(&card->tx_timer); 1270 1271 /* if all packets are in the stack, enable interrupts and return 0 */ 1272 /* if not, return 1 */ 1273 if (packets_done < budget) { 1274 napi_complete_done(napi, packets_done); 1275 spider_net_rx_irq_on(card); 1276 card->ignore_rx_ramfull = 0; 1277 } 1278 1279 return packets_done; 1280 } 1281 1282 /** 1283 * spider_net_set_mac - sets the MAC of an interface 1284 * @netdev: interface device structure 1285 * @ptr: pointer to new MAC address 1286 * 1287 * Returns 0 on success, <0 on failure. Currently, we don't support this 1288 * and will always return EOPNOTSUPP. 1289 */ 1290 static int 1291 spider_net_set_mac(struct net_device *netdev, void *p) 1292 { 1293 struct spider_net_card *card = netdev_priv(netdev); 1294 u32 macl, macu, regvalue; 1295 struct sockaddr *addr = p; 1296 1297 if (!is_valid_ether_addr(addr->sa_data)) 1298 return -EADDRNOTAVAIL; 1299 1300 memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); 1301 1302 /* switch off GMACTPE and GMACRPE */ 1303 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); 1304 regvalue &= ~((1 << 5) | (1 << 6)); 1305 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); 1306 1307 /* write mac */ 1308 macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) + 1309 (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]); 1310 macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]); 1311 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); 1312 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); 1313 1314 /* switch GMACTPE and GMACRPE back on */ 1315 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); 1316 regvalue |= ((1 << 5) | (1 << 6)); 1317 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); 1318 1319 spider_net_set_promisc(card); 1320 1321 return 0; 1322 } 1323 1324 /** 1325 * spider_net_link_reset 1326 * @netdev: net device structure 1327 * 1328 * This is called when the PHY_LINK signal is asserted. For the blade this is 1329 * not connected so we should never get here. 1330 * 1331 */ 1332 static void 1333 spider_net_link_reset(struct net_device *netdev) 1334 { 1335 1336 struct spider_net_card *card = netdev_priv(netdev); 1337 1338 del_timer_sync(&card->aneg_timer); 1339 1340 /* clear interrupt, block further interrupts */ 1341 spider_net_write_reg(card, SPIDER_NET_GMACST, 1342 spider_net_read_reg(card, SPIDER_NET_GMACST)); 1343 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); 1344 1345 /* reset phy and setup aneg */ 1346 card->aneg_count = 0; 1347 card->medium = BCM54XX_COPPER; 1348 spider_net_setup_aneg(card); 1349 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1350 1351 } 1352 1353 /** 1354 * spider_net_handle_error_irq - handles errors raised by an interrupt 1355 * @card: card structure 1356 * @status_reg: interrupt status register 0 (GHIINT0STS) 1357 * 1358 * spider_net_handle_error_irq treats or ignores all error conditions 1359 * found when an interrupt is presented 1360 */ 1361 static void 1362 spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, 1363 u32 error_reg1, u32 error_reg2) 1364 { 1365 u32 i; 1366 int show_error = 1; 1367 1368 /* check GHIINT0STS ************************************/ 1369 if (status_reg) 1370 for (i = 0; i < 32; i++) 1371 if (status_reg & (1<<i)) 1372 switch (i) 1373 { 1374 /* let error_reg1 and error_reg2 evaluation decide, what to do 1375 case SPIDER_NET_PHYINT: 1376 case SPIDER_NET_GMAC2INT: 1377 case SPIDER_NET_GMAC1INT: 1378 case SPIDER_NET_GFIFOINT: 1379 case SPIDER_NET_DMACINT: 1380 case SPIDER_NET_GSYSINT: 1381 break; */ 1382 1383 case SPIDER_NET_GIPSINT: 1384 show_error = 0; 1385 break; 1386 1387 case SPIDER_NET_GPWOPCMPINT: 1388 /* PHY write operation completed */ 1389 show_error = 0; 1390 break; 1391 case SPIDER_NET_GPROPCMPINT: 1392 /* PHY read operation completed */ 1393 /* we don't use semaphores, as we poll for the completion 1394 * of the read operation in spider_net_read_phy. Should take 1395 * about 50 us */ 1396 show_error = 0; 1397 break; 1398 case SPIDER_NET_GPWFFINT: 1399 /* PHY command queue full */ 1400 if (netif_msg_intr(card)) 1401 dev_err(&card->netdev->dev, "PHY write queue full\n"); 1402 show_error = 0; 1403 break; 1404 1405 /* case SPIDER_NET_GRMDADRINT: not used. print a message */ 1406 /* case SPIDER_NET_GRMARPINT: not used. print a message */ 1407 /* case SPIDER_NET_GRMMPINT: not used. print a message */ 1408 1409 case SPIDER_NET_GDTDEN0INT: 1410 /* someone has set TX_DMA_EN to 0 */ 1411 show_error = 0; 1412 break; 1413 1414 case SPIDER_NET_GDDDEN0INT: /* fallthrough */ 1415 case SPIDER_NET_GDCDEN0INT: /* fallthrough */ 1416 case SPIDER_NET_GDBDEN0INT: /* fallthrough */ 1417 case SPIDER_NET_GDADEN0INT: 1418 /* someone has set RX_DMA_EN to 0 */ 1419 show_error = 0; 1420 break; 1421 1422 /* RX interrupts */ 1423 case SPIDER_NET_GDDFDCINT: 1424 case SPIDER_NET_GDCFDCINT: 1425 case SPIDER_NET_GDBFDCINT: 1426 case SPIDER_NET_GDAFDCINT: 1427 /* case SPIDER_NET_GDNMINT: not used. print a message */ 1428 /* case SPIDER_NET_GCNMINT: not used. print a message */ 1429 /* case SPIDER_NET_GBNMINT: not used. print a message */ 1430 /* case SPIDER_NET_GANMINT: not used. print a message */ 1431 /* case SPIDER_NET_GRFNMINT: not used. print a message */ 1432 show_error = 0; 1433 break; 1434 1435 /* TX interrupts */ 1436 case SPIDER_NET_GDTFDCINT: 1437 show_error = 0; 1438 break; 1439 case SPIDER_NET_GTTEDINT: 1440 show_error = 0; 1441 break; 1442 case SPIDER_NET_GDTDCEINT: 1443 /* chain end. If a descriptor should be sent, kick off 1444 * tx dma 1445 if (card->tx_chain.tail != card->tx_chain.head) 1446 spider_net_kick_tx_dma(card); 1447 */ 1448 show_error = 0; 1449 break; 1450 1451 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */ 1452 /* case SPIDER_NET_GFREECNTINT: not used. print a message */ 1453 } 1454 1455 /* check GHIINT1STS ************************************/ 1456 if (error_reg1) 1457 for (i = 0; i < 32; i++) 1458 if (error_reg1 & (1<<i)) 1459 switch (i) 1460 { 1461 case SPIDER_NET_GTMFLLINT: 1462 /* TX RAM full may happen on a usual case. 1463 * Logging is not needed. */ 1464 show_error = 0; 1465 break; 1466 case SPIDER_NET_GRFDFLLINT: /* fallthrough */ 1467 case SPIDER_NET_GRFCFLLINT: /* fallthrough */ 1468 case SPIDER_NET_GRFBFLLINT: /* fallthrough */ 1469 case SPIDER_NET_GRFAFLLINT: /* fallthrough */ 1470 case SPIDER_NET_GRMFLLINT: 1471 /* Could happen when rx chain is full */ 1472 if (card->ignore_rx_ramfull == 0) { 1473 card->ignore_rx_ramfull = 1; 1474 spider_net_resync_head_ptr(card); 1475 spider_net_refill_rx_chain(card); 1476 spider_net_enable_rxdmac(card); 1477 card->num_rx_ints ++; 1478 napi_schedule(&card->napi); 1479 } 1480 show_error = 0; 1481 break; 1482 1483 /* case SPIDER_NET_GTMSHTINT: problem, print a message */ 1484 case SPIDER_NET_GDTINVDINT: 1485 /* allrighty. tx from previous descr ok */ 1486 show_error = 0; 1487 break; 1488 1489 /* chain end */ 1490 case SPIDER_NET_GDDDCEINT: /* fallthrough */ 1491 case SPIDER_NET_GDCDCEINT: /* fallthrough */ 1492 case SPIDER_NET_GDBDCEINT: /* fallthrough */ 1493 case SPIDER_NET_GDADCEINT: 1494 spider_net_resync_head_ptr(card); 1495 spider_net_refill_rx_chain(card); 1496 spider_net_enable_rxdmac(card); 1497 card->num_rx_ints ++; 1498 napi_schedule(&card->napi); 1499 show_error = 0; 1500 break; 1501 1502 /* invalid descriptor */ 1503 case SPIDER_NET_GDDINVDINT: /* fallthrough */ 1504 case SPIDER_NET_GDCINVDINT: /* fallthrough */ 1505 case SPIDER_NET_GDBINVDINT: /* fallthrough */ 1506 case SPIDER_NET_GDAINVDINT: 1507 /* Could happen when rx chain is full */ 1508 spider_net_resync_head_ptr(card); 1509 spider_net_refill_rx_chain(card); 1510 spider_net_enable_rxdmac(card); 1511 card->num_rx_ints ++; 1512 napi_schedule(&card->napi); 1513 show_error = 0; 1514 break; 1515 1516 /* case SPIDER_NET_GDTRSERINT: problem, print a message */ 1517 /* case SPIDER_NET_GDDRSERINT: problem, print a message */ 1518 /* case SPIDER_NET_GDCRSERINT: problem, print a message */ 1519 /* case SPIDER_NET_GDBRSERINT: problem, print a message */ 1520 /* case SPIDER_NET_GDARSERINT: problem, print a message */ 1521 /* case SPIDER_NET_GDSERINT: problem, print a message */ 1522 /* case SPIDER_NET_GDTPTERINT: problem, print a message */ 1523 /* case SPIDER_NET_GDDPTERINT: problem, print a message */ 1524 /* case SPIDER_NET_GDCPTERINT: problem, print a message */ 1525 /* case SPIDER_NET_GDBPTERINT: problem, print a message */ 1526 /* case SPIDER_NET_GDAPTERINT: problem, print a message */ 1527 default: 1528 show_error = 1; 1529 break; 1530 } 1531 1532 /* check GHIINT2STS ************************************/ 1533 if (error_reg2) 1534 for (i = 0; i < 32; i++) 1535 if (error_reg2 & (1<<i)) 1536 switch (i) 1537 { 1538 /* there is nothing we can (want to) do at this time. Log a 1539 * message, we can switch on and off the specific values later on 1540 case SPIDER_NET_GPROPERINT: 1541 case SPIDER_NET_GMCTCRSNGINT: 1542 case SPIDER_NET_GMCTLCOLINT: 1543 case SPIDER_NET_GMCTTMOTINT: 1544 case SPIDER_NET_GMCRCAERINT: 1545 case SPIDER_NET_GMCRCALERINT: 1546 case SPIDER_NET_GMCRALNERINT: 1547 case SPIDER_NET_GMCROVRINT: 1548 case SPIDER_NET_GMCRRNTINT: 1549 case SPIDER_NET_GMCRRXERINT: 1550 case SPIDER_NET_GTITCSERINT: 1551 case SPIDER_NET_GTIFMTERINT: 1552 case SPIDER_NET_GTIPKTRVKINT: 1553 case SPIDER_NET_GTISPINGINT: 1554 case SPIDER_NET_GTISADNGINT: 1555 case SPIDER_NET_GTISPDNGINT: 1556 case SPIDER_NET_GRIFMTERINT: 1557 case SPIDER_NET_GRIPKTRVKINT: 1558 case SPIDER_NET_GRISPINGINT: 1559 case SPIDER_NET_GRISADNGINT: 1560 case SPIDER_NET_GRISPDNGINT: 1561 break; 1562 */ 1563 default: 1564 break; 1565 } 1566 1567 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit()) 1568 dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, " 1569 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1570 status_reg, error_reg1, error_reg2); 1571 1572 /* clear interrupt sources */ 1573 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1); 1574 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2); 1575 } 1576 1577 /** 1578 * spider_net_interrupt - interrupt handler for spider_net 1579 * @irq: interrupt number 1580 * @ptr: pointer to net_device 1581 * 1582 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no 1583 * interrupt found raised by card. 1584 * 1585 * This is the interrupt handler, that turns off 1586 * interrupts for this device and makes the stack poll the driver 1587 */ 1588 static irqreturn_t 1589 spider_net_interrupt(int irq, void *ptr) 1590 { 1591 struct net_device *netdev = ptr; 1592 struct spider_net_card *card = netdev_priv(netdev); 1593 u32 status_reg, error_reg1, error_reg2; 1594 1595 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); 1596 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS); 1597 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS); 1598 1599 if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) && 1600 !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) && 1601 !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE)) 1602 return IRQ_NONE; 1603 1604 if (status_reg & SPIDER_NET_RXINT ) { 1605 spider_net_rx_irq_off(card); 1606 napi_schedule(&card->napi); 1607 card->num_rx_ints ++; 1608 } 1609 if (status_reg & SPIDER_NET_TXINT) 1610 napi_schedule(&card->napi); 1611 1612 if (status_reg & SPIDER_NET_LINKINT) 1613 spider_net_link_reset(netdev); 1614 1615 if (status_reg & SPIDER_NET_ERRINT ) 1616 spider_net_handle_error_irq(card, status_reg, 1617 error_reg1, error_reg2); 1618 1619 /* clear interrupt sources */ 1620 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1621 1622 return IRQ_HANDLED; 1623 } 1624 1625 #ifdef CONFIG_NET_POLL_CONTROLLER 1626 /** 1627 * spider_net_poll_controller - artificial interrupt for netconsole etc. 1628 * @netdev: interface device structure 1629 * 1630 * see Documentation/networking/netconsole.txt 1631 */ 1632 static void 1633 spider_net_poll_controller(struct net_device *netdev) 1634 { 1635 disable_irq(netdev->irq); 1636 spider_net_interrupt(netdev->irq, netdev); 1637 enable_irq(netdev->irq); 1638 } 1639 #endif /* CONFIG_NET_POLL_CONTROLLER */ 1640 1641 /** 1642 * spider_net_enable_interrupts - enable interrupts 1643 * @card: card structure 1644 * 1645 * spider_net_enable_interrupt enables several interrupts 1646 */ 1647 static void 1648 spider_net_enable_interrupts(struct spider_net_card *card) 1649 { 1650 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 1651 SPIDER_NET_INT0_MASK_VALUE); 1652 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 1653 SPIDER_NET_INT1_MASK_VALUE); 1654 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 1655 SPIDER_NET_INT2_MASK_VALUE); 1656 } 1657 1658 /** 1659 * spider_net_disable_interrupts - disable interrupts 1660 * @card: card structure 1661 * 1662 * spider_net_disable_interrupts disables all the interrupts 1663 */ 1664 static void 1665 spider_net_disable_interrupts(struct spider_net_card *card) 1666 { 1667 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 1668 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); 1669 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); 1670 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); 1671 } 1672 1673 /** 1674 * spider_net_init_card - initializes the card 1675 * @card: card structure 1676 * 1677 * spider_net_init_card initializes the card so that other registers can 1678 * be used 1679 */ 1680 static void 1681 spider_net_init_card(struct spider_net_card *card) 1682 { 1683 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 1684 SPIDER_NET_CKRCTRL_STOP_VALUE); 1685 1686 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 1687 SPIDER_NET_CKRCTRL_RUN_VALUE); 1688 1689 /* trigger ETOMOD signal */ 1690 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, 1691 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4); 1692 1693 spider_net_disable_interrupts(card); 1694 } 1695 1696 /** 1697 * spider_net_enable_card - enables the card by setting all kinds of regs 1698 * @card: card structure 1699 * 1700 * spider_net_enable_card sets a lot of SMMIO registers to enable the device 1701 */ 1702 static void 1703 spider_net_enable_card(struct spider_net_card *card) 1704 { 1705 int i; 1706 /* the following array consists of (register),(value) pairs 1707 * that are set in this function. A register of 0 ends the list */ 1708 u32 regs[][2] = { 1709 { SPIDER_NET_GRESUMINTNUM, 0 }, 1710 { SPIDER_NET_GREINTNUM, 0 }, 1711 1712 /* set interrupt frame number registers */ 1713 /* clear the single DMA engine registers first */ 1714 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1715 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1716 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1717 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1718 /* then set, what we really need */ 1719 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE }, 1720 1721 /* timer counter registers and stuff */ 1722 { SPIDER_NET_GFREECNNUM, 0 }, 1723 { SPIDER_NET_GONETIMENUM, 0 }, 1724 { SPIDER_NET_GTOUTFRMNUM, 0 }, 1725 1726 /* RX mode setting */ 1727 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE }, 1728 /* TX mode setting */ 1729 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE }, 1730 /* IPSEC mode setting */ 1731 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE }, 1732 1733 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, 1734 1735 { SPIDER_NET_GMRWOLCTRL, 0 }, 1736 { SPIDER_NET_GTESTMD, 0x10000000 }, 1737 { SPIDER_NET_GTTQMSK, 0x00400040 }, 1738 1739 { SPIDER_NET_GMACINTEN, 0 }, 1740 1741 /* flow control stuff */ 1742 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE }, 1743 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE }, 1744 1745 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE }, 1746 { 0, 0} 1747 }; 1748 1749 i = 0; 1750 while (regs[i][0]) { 1751 spider_net_write_reg(card, regs[i][0], regs[i][1]); 1752 i++; 1753 } 1754 1755 /* clear unicast filter table entries 1 to 14 */ 1756 for (i = 1; i <= 14; i++) { 1757 spider_net_write_reg(card, 1758 SPIDER_NET_GMRUAFILnR + i * 8, 1759 0x00080000); 1760 spider_net_write_reg(card, 1761 SPIDER_NET_GMRUAFILnR + i * 8 + 4, 1762 0x00000000); 1763 } 1764 1765 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000); 1766 1767 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); 1768 1769 /* set chain tail address for RX chains and 1770 * enable DMA */ 1771 spider_net_enable_rxchtails(card); 1772 spider_net_enable_rxdmac(card); 1773 1774 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); 1775 1776 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1777 SPIDER_NET_LENLMT_VALUE); 1778 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, 1779 SPIDER_NET_OPMODE_VALUE); 1780 1781 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 1782 SPIDER_NET_GDTBSTA); 1783 } 1784 1785 /** 1786 * spider_net_download_firmware - loads firmware into the adapter 1787 * @card: card structure 1788 * @firmware_ptr: pointer to firmware data 1789 * 1790 * spider_net_download_firmware loads the firmware data into the 1791 * adapter. It assumes the length etc. to be allright. 1792 */ 1793 static int 1794 spider_net_download_firmware(struct spider_net_card *card, 1795 const void *firmware_ptr) 1796 { 1797 int sequencer, i; 1798 const u32 *fw_ptr = firmware_ptr; 1799 1800 /* stop sequencers */ 1801 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1802 SPIDER_NET_STOP_SEQ_VALUE); 1803 1804 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 1805 sequencer++) { 1806 spider_net_write_reg(card, 1807 SPIDER_NET_GSnPRGADR + sequencer * 8, 0); 1808 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 1809 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1810 sequencer * 8, *fw_ptr); 1811 fw_ptr++; 1812 } 1813 } 1814 1815 if (spider_net_read_reg(card, SPIDER_NET_GSINIT)) 1816 return -EIO; 1817 1818 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1819 SPIDER_NET_RUN_SEQ_VALUE); 1820 1821 return 0; 1822 } 1823 1824 /** 1825 * spider_net_init_firmware - reads in firmware parts 1826 * @card: card structure 1827 * 1828 * Returns 0 on success, <0 on failure 1829 * 1830 * spider_net_init_firmware opens the sequencer firmware and does some basic 1831 * checks. This function opens and releases the firmware structure. A call 1832 * to download the firmware is performed before the release. 1833 * 1834 * Firmware format 1835 * =============== 1836 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being 1837 * the program for each sequencer. Use the command 1838 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \ 1839 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \ 1840 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin 1841 * 1842 * to generate spider_fw.bin, if you have sequencer programs with something 1843 * like the following contents for each sequencer: 1844 * <ONE LINE COMMENT> 1845 * <FIRST 4-BYTES-WORD FOR SEQUENCER> 1846 * <SECOND 4-BYTES-WORD FOR SEQUENCER> 1847 * ... 1848 * <1024th 4-BYTES-WORD FOR SEQUENCER> 1849 */ 1850 static int 1851 spider_net_init_firmware(struct spider_net_card *card) 1852 { 1853 struct firmware *firmware = NULL; 1854 struct device_node *dn; 1855 const u8 *fw_prop = NULL; 1856 int err = -ENOENT; 1857 int fw_size; 1858 1859 if (request_firmware((const struct firmware **)&firmware, 1860 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) { 1861 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) && 1862 netif_msg_probe(card) ) { 1863 dev_err(&card->netdev->dev, 1864 "Incorrect size of spidernet firmware in " \ 1865 "filesystem. Looking in host firmware...\n"); 1866 goto try_host_fw; 1867 } 1868 err = spider_net_download_firmware(card, firmware->data); 1869 1870 release_firmware(firmware); 1871 if (err) 1872 goto try_host_fw; 1873 1874 goto done; 1875 } 1876 1877 try_host_fw: 1878 dn = pci_device_to_OF_node(card->pdev); 1879 if (!dn) 1880 goto out_err; 1881 1882 fw_prop = of_get_property(dn, "firmware", &fw_size); 1883 if (!fw_prop) 1884 goto out_err; 1885 1886 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) && 1887 netif_msg_probe(card) ) { 1888 dev_err(&card->netdev->dev, 1889 "Incorrect size of spidernet firmware in host firmware\n"); 1890 goto done; 1891 } 1892 1893 err = spider_net_download_firmware(card, fw_prop); 1894 1895 done: 1896 return err; 1897 out_err: 1898 if (netif_msg_probe(card)) 1899 dev_err(&card->netdev->dev, 1900 "Couldn't find spidernet firmware in filesystem " \ 1901 "or host firmware\n"); 1902 return err; 1903 } 1904 1905 /** 1906 * spider_net_open - called upon ifonfig up 1907 * @netdev: interface device structure 1908 * 1909 * returns 0 on success, <0 on failure 1910 * 1911 * spider_net_open allocates all the descriptors and memory needed for 1912 * operation, sets up multicast list and enables interrupts 1913 */ 1914 int 1915 spider_net_open(struct net_device *netdev) 1916 { 1917 struct spider_net_card *card = netdev_priv(netdev); 1918 int result; 1919 1920 result = spider_net_init_firmware(card); 1921 if (result) 1922 goto init_firmware_failed; 1923 1924 /* start probing with copper */ 1925 card->aneg_count = 0; 1926 card->medium = BCM54XX_COPPER; 1927 spider_net_setup_aneg(card); 1928 if (card->phy.def->phy_id) 1929 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1930 1931 result = spider_net_init_chain(card, &card->tx_chain); 1932 if (result) 1933 goto alloc_tx_failed; 1934 card->low_watermark = NULL; 1935 1936 result = spider_net_init_chain(card, &card->rx_chain); 1937 if (result) 1938 goto alloc_rx_failed; 1939 1940 /* Allocate rx skbs */ 1941 result = spider_net_alloc_rx_skbs(card); 1942 if (result) 1943 goto alloc_skbs_failed; 1944 1945 spider_net_set_multi(netdev); 1946 1947 /* further enhancement: setup hw vlan, if needed */ 1948 1949 result = -EBUSY; 1950 if (request_irq(netdev->irq, spider_net_interrupt, 1951 IRQF_SHARED, netdev->name, netdev)) 1952 goto register_int_failed; 1953 1954 spider_net_enable_card(card); 1955 1956 netif_start_queue(netdev); 1957 netif_carrier_on(netdev); 1958 napi_enable(&card->napi); 1959 1960 spider_net_enable_interrupts(card); 1961 1962 return 0; 1963 1964 register_int_failed: 1965 spider_net_free_rx_chain_contents(card); 1966 alloc_skbs_failed: 1967 spider_net_free_chain(card, &card->rx_chain); 1968 alloc_rx_failed: 1969 spider_net_free_chain(card, &card->tx_chain); 1970 alloc_tx_failed: 1971 del_timer_sync(&card->aneg_timer); 1972 init_firmware_failed: 1973 return result; 1974 } 1975 1976 /** 1977 * spider_net_link_phy 1978 * @data: used for pointer to card structure 1979 * 1980 */ 1981 static void spider_net_link_phy(struct timer_list *t) 1982 { 1983 struct spider_net_card *card = from_timer(card, t, aneg_timer); 1984 struct mii_phy *phy = &card->phy; 1985 1986 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ 1987 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { 1988 1989 pr_debug("%s: link is down trying to bring it up\n", 1990 card->netdev->name); 1991 1992 switch (card->medium) { 1993 case BCM54XX_COPPER: 1994 /* enable fiber with autonegotiation first */ 1995 if (phy->def->ops->enable_fiber) 1996 phy->def->ops->enable_fiber(phy, 1); 1997 card->medium = BCM54XX_FIBER; 1998 break; 1999 2000 case BCM54XX_FIBER: 2001 /* fiber didn't come up, try to disable fiber autoneg */ 2002 if (phy->def->ops->enable_fiber) 2003 phy->def->ops->enable_fiber(phy, 0); 2004 card->medium = BCM54XX_UNKNOWN; 2005 break; 2006 2007 case BCM54XX_UNKNOWN: 2008 /* copper, fiber with and without failed, 2009 * retry from beginning */ 2010 spider_net_setup_aneg(card); 2011 card->medium = BCM54XX_COPPER; 2012 break; 2013 } 2014 2015 card->aneg_count = 0; 2016 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 2017 return; 2018 } 2019 2020 /* link still not up, try again later */ 2021 if (!(phy->def->ops->poll_link(phy))) { 2022 card->aneg_count++; 2023 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 2024 return; 2025 } 2026 2027 /* link came up, get abilities */ 2028 phy->def->ops->read_link(phy); 2029 2030 spider_net_write_reg(card, SPIDER_NET_GMACST, 2031 spider_net_read_reg(card, SPIDER_NET_GMACST)); 2032 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4); 2033 2034 if (phy->speed == 1000) 2035 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001); 2036 else 2037 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0); 2038 2039 card->aneg_count = 0; 2040 2041 pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n", 2042 card->netdev->name, phy->speed, 2043 phy->duplex == 1 ? "Full" : "Half", 2044 phy->autoneg == 1 ? "" : "no "); 2045 } 2046 2047 /** 2048 * spider_net_setup_phy - setup PHY 2049 * @card: card structure 2050 * 2051 * returns 0 on success, <0 on failure 2052 * 2053 * spider_net_setup_phy is used as part of spider_net_probe. 2054 **/ 2055 static int 2056 spider_net_setup_phy(struct spider_net_card *card) 2057 { 2058 struct mii_phy *phy = &card->phy; 2059 2060 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL, 2061 SPIDER_NET_DMASEL_VALUE); 2062 spider_net_write_reg(card, SPIDER_NET_GPCCTRL, 2063 SPIDER_NET_PHY_CTRL_VALUE); 2064 2065 phy->dev = card->netdev; 2066 phy->mdio_read = spider_net_read_phy; 2067 phy->mdio_write = spider_net_write_phy; 2068 2069 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) { 2070 unsigned short id; 2071 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); 2072 if (id != 0x0000 && id != 0xffff) { 2073 if (!sungem_phy_probe(phy, phy->mii_id)) { 2074 pr_info("Found %s.\n", phy->def->name); 2075 break; 2076 } 2077 } 2078 } 2079 2080 return 0; 2081 } 2082 2083 /** 2084 * spider_net_workaround_rxramfull - work around firmware bug 2085 * @card: card structure 2086 * 2087 * no return value 2088 **/ 2089 static void 2090 spider_net_workaround_rxramfull(struct spider_net_card *card) 2091 { 2092 int i, sequencer = 0; 2093 2094 /* cancel reset */ 2095 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2096 SPIDER_NET_CKRCTRL_RUN_VALUE); 2097 2098 /* empty sequencer data */ 2099 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 2100 sequencer++) { 2101 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + 2102 sequencer * 8, 0x0); 2103 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 2104 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 2105 sequencer * 8, 0x0); 2106 } 2107 } 2108 2109 /* set sequencer operation */ 2110 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe); 2111 2112 /* reset */ 2113 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2114 SPIDER_NET_CKRCTRL_STOP_VALUE); 2115 } 2116 2117 /** 2118 * spider_net_stop - called upon ifconfig down 2119 * @netdev: interface device structure 2120 * 2121 * always returns 0 2122 */ 2123 int 2124 spider_net_stop(struct net_device *netdev) 2125 { 2126 struct spider_net_card *card = netdev_priv(netdev); 2127 2128 napi_disable(&card->napi); 2129 netif_carrier_off(netdev); 2130 netif_stop_queue(netdev); 2131 del_timer_sync(&card->tx_timer); 2132 del_timer_sync(&card->aneg_timer); 2133 2134 spider_net_disable_interrupts(card); 2135 2136 free_irq(netdev->irq, netdev); 2137 2138 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 2139 SPIDER_NET_DMA_TX_FEND_VALUE); 2140 2141 /* turn off DMA, force end */ 2142 spider_net_disable_rxdmac(card); 2143 2144 /* release chains */ 2145 spider_net_release_tx_chain(card, 1); 2146 spider_net_free_rx_chain_contents(card); 2147 2148 spider_net_free_chain(card, &card->tx_chain); 2149 spider_net_free_chain(card, &card->rx_chain); 2150 2151 return 0; 2152 } 2153 2154 /** 2155 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout 2156 * function (to be called not under interrupt status) 2157 * @data: data, is interface device structure 2158 * 2159 * called as task when tx hangs, resets interface (if interface is up) 2160 */ 2161 static void 2162 spider_net_tx_timeout_task(struct work_struct *work) 2163 { 2164 struct spider_net_card *card = 2165 container_of(work, struct spider_net_card, tx_timeout_task); 2166 struct net_device *netdev = card->netdev; 2167 2168 if (!(netdev->flags & IFF_UP)) 2169 goto out; 2170 2171 netif_device_detach(netdev); 2172 spider_net_stop(netdev); 2173 2174 spider_net_workaround_rxramfull(card); 2175 spider_net_init_card(card); 2176 2177 if (spider_net_setup_phy(card)) 2178 goto out; 2179 2180 spider_net_open(netdev); 2181 spider_net_kick_tx_dma(card); 2182 netif_device_attach(netdev); 2183 2184 out: 2185 atomic_dec(&card->tx_timeout_task_counter); 2186 } 2187 2188 /** 2189 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in. 2190 * @netdev: interface device structure 2191 * 2192 * called, if tx hangs. Schedules a task that resets the interface 2193 */ 2194 static void 2195 spider_net_tx_timeout(struct net_device *netdev) 2196 { 2197 struct spider_net_card *card; 2198 2199 card = netdev_priv(netdev); 2200 atomic_inc(&card->tx_timeout_task_counter); 2201 if (netdev->flags & IFF_UP) 2202 schedule_work(&card->tx_timeout_task); 2203 else 2204 atomic_dec(&card->tx_timeout_task_counter); 2205 card->spider_stats.tx_timeouts++; 2206 } 2207 2208 static const struct net_device_ops spider_net_ops = { 2209 .ndo_open = spider_net_open, 2210 .ndo_stop = spider_net_stop, 2211 .ndo_start_xmit = spider_net_xmit, 2212 .ndo_set_rx_mode = spider_net_set_multi, 2213 .ndo_set_mac_address = spider_net_set_mac, 2214 .ndo_do_ioctl = spider_net_do_ioctl, 2215 .ndo_tx_timeout = spider_net_tx_timeout, 2216 .ndo_validate_addr = eth_validate_addr, 2217 /* HW VLAN */ 2218 #ifdef CONFIG_NET_POLL_CONTROLLER 2219 /* poll controller */ 2220 .ndo_poll_controller = spider_net_poll_controller, 2221 #endif /* CONFIG_NET_POLL_CONTROLLER */ 2222 }; 2223 2224 /** 2225 * spider_net_setup_netdev_ops - initialization of net_device operations 2226 * @netdev: net_device structure 2227 * 2228 * fills out function pointers in the net_device structure 2229 */ 2230 static void 2231 spider_net_setup_netdev_ops(struct net_device *netdev) 2232 { 2233 netdev->netdev_ops = &spider_net_ops; 2234 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; 2235 /* ethtool ops */ 2236 netdev->ethtool_ops = &spider_net_ethtool_ops; 2237 } 2238 2239 /** 2240 * spider_net_setup_netdev - initialization of net_device 2241 * @card: card structure 2242 * 2243 * Returns 0 on success or <0 on failure 2244 * 2245 * spider_net_setup_netdev initializes the net_device structure 2246 **/ 2247 static int 2248 spider_net_setup_netdev(struct spider_net_card *card) 2249 { 2250 int result; 2251 struct net_device *netdev = card->netdev; 2252 struct device_node *dn; 2253 struct sockaddr addr; 2254 const u8 *mac; 2255 2256 SET_NETDEV_DEV(netdev, &card->pdev->dev); 2257 2258 pci_set_drvdata(card->pdev, netdev); 2259 2260 timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0); 2261 netdev->irq = card->pdev->irq; 2262 2263 card->aneg_count = 0; 2264 timer_setup(&card->aneg_timer, spider_net_link_phy, 0); 2265 2266 netif_napi_add(netdev, &card->napi, 2267 spider_net_poll, SPIDER_NET_NAPI_WEIGHT); 2268 2269 spider_net_setup_netdev_ops(netdev); 2270 2271 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2272 if (SPIDER_NET_RX_CSUM_DEFAULT) 2273 netdev->features |= NETIF_F_RXCSUM; 2274 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; 2275 /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2276 * NETIF_F_HW_VLAN_CTAG_FILTER */ 2277 2278 /* MTU range: 64 - 2294 */ 2279 netdev->min_mtu = SPIDER_NET_MIN_MTU; 2280 netdev->max_mtu = SPIDER_NET_MAX_MTU; 2281 2282 netdev->irq = card->pdev->irq; 2283 card->num_rx_ints = 0; 2284 card->ignore_rx_ramfull = 0; 2285 2286 dn = pci_device_to_OF_node(card->pdev); 2287 if (!dn) 2288 return -EIO; 2289 2290 mac = of_get_property(dn, "local-mac-address", NULL); 2291 if (!mac) 2292 return -EIO; 2293 memcpy(addr.sa_data, mac, ETH_ALEN); 2294 2295 result = spider_net_set_mac(netdev, &addr); 2296 if ((result) && (netif_msg_probe(card))) 2297 dev_err(&card->netdev->dev, 2298 "Failed to set MAC address: %i\n", result); 2299 2300 result = register_netdev(netdev); 2301 if (result) { 2302 if (netif_msg_probe(card)) 2303 dev_err(&card->netdev->dev, 2304 "Couldn't register net_device: %i\n", result); 2305 return result; 2306 } 2307 2308 if (netif_msg_probe(card)) 2309 pr_info("Initialized device %s.\n", netdev->name); 2310 2311 return 0; 2312 } 2313 2314 /** 2315 * spider_net_alloc_card - allocates net_device and card structure 2316 * 2317 * returns the card structure or NULL in case of errors 2318 * 2319 * the card and net_device structures are linked to each other 2320 */ 2321 static struct spider_net_card * 2322 spider_net_alloc_card(void) 2323 { 2324 struct net_device *netdev; 2325 struct spider_net_card *card; 2326 size_t alloc_size; 2327 2328 alloc_size = sizeof(struct spider_net_card) + 2329 (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr); 2330 netdev = alloc_etherdev(alloc_size); 2331 if (!netdev) 2332 return NULL; 2333 2334 card = netdev_priv(netdev); 2335 card->netdev = netdev; 2336 card->msg_enable = SPIDER_NET_DEFAULT_MSG; 2337 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); 2338 init_waitqueue_head(&card->waitq); 2339 atomic_set(&card->tx_timeout_task_counter, 0); 2340 2341 card->rx_chain.num_desc = rx_descriptors; 2342 card->rx_chain.ring = card->darray; 2343 card->tx_chain.num_desc = tx_descriptors; 2344 card->tx_chain.ring = card->darray + rx_descriptors; 2345 2346 return card; 2347 } 2348 2349 /** 2350 * spider_net_undo_pci_setup - releases PCI ressources 2351 * @card: card structure 2352 * 2353 * spider_net_undo_pci_setup releases the mapped regions 2354 */ 2355 static void 2356 spider_net_undo_pci_setup(struct spider_net_card *card) 2357 { 2358 iounmap(card->regs); 2359 pci_release_regions(card->pdev); 2360 } 2361 2362 /** 2363 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations 2364 * @pdev: PCI device 2365 * 2366 * Returns the card structure or NULL if any errors occur 2367 * 2368 * spider_net_setup_pci_dev initializes pdev and together with the 2369 * functions called in spider_net_open configures the device so that 2370 * data can be transferred over it 2371 * The net_device structure is attached to the card structure, if the 2372 * function returns without error. 2373 **/ 2374 static struct spider_net_card * 2375 spider_net_setup_pci_dev(struct pci_dev *pdev) 2376 { 2377 struct spider_net_card *card; 2378 unsigned long mmio_start, mmio_len; 2379 2380 if (pci_enable_device(pdev)) { 2381 dev_err(&pdev->dev, "Couldn't enable PCI device\n"); 2382 return NULL; 2383 } 2384 2385 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2386 dev_err(&pdev->dev, 2387 "Couldn't find proper PCI device base address.\n"); 2388 goto out_disable_dev; 2389 } 2390 2391 if (pci_request_regions(pdev, spider_net_driver_name)) { 2392 dev_err(&pdev->dev, 2393 "Couldn't obtain PCI resources, aborting.\n"); 2394 goto out_disable_dev; 2395 } 2396 2397 pci_set_master(pdev); 2398 2399 card = spider_net_alloc_card(); 2400 if (!card) { 2401 dev_err(&pdev->dev, 2402 "Couldn't allocate net_device structure, aborting.\n"); 2403 goto out_release_regions; 2404 } 2405 card->pdev = pdev; 2406 2407 /* fetch base address and length of first resource */ 2408 mmio_start = pci_resource_start(pdev, 0); 2409 mmio_len = pci_resource_len(pdev, 0); 2410 2411 card->netdev->mem_start = mmio_start; 2412 card->netdev->mem_end = mmio_start + mmio_len; 2413 card->regs = ioremap(mmio_start, mmio_len); 2414 2415 if (!card->regs) { 2416 dev_err(&pdev->dev, 2417 "Couldn't obtain PCI resources, aborting.\n"); 2418 goto out_release_regions; 2419 } 2420 2421 return card; 2422 2423 out_release_regions: 2424 pci_release_regions(pdev); 2425 out_disable_dev: 2426 pci_disable_device(pdev); 2427 return NULL; 2428 } 2429 2430 /** 2431 * spider_net_probe - initialization of a device 2432 * @pdev: PCI device 2433 * @ent: entry in the device id list 2434 * 2435 * Returns 0 on success, <0 on failure 2436 * 2437 * spider_net_probe initializes pdev and registers a net_device 2438 * structure for it. After that, the device can be ifconfig'ed up 2439 **/ 2440 static int 2441 spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2442 { 2443 int err = -EIO; 2444 struct spider_net_card *card; 2445 2446 card = spider_net_setup_pci_dev(pdev); 2447 if (!card) 2448 goto out; 2449 2450 spider_net_workaround_rxramfull(card); 2451 spider_net_init_card(card); 2452 2453 err = spider_net_setup_phy(card); 2454 if (err) 2455 goto out_undo_pci; 2456 2457 err = spider_net_setup_netdev(card); 2458 if (err) 2459 goto out_undo_pci; 2460 2461 return 0; 2462 2463 out_undo_pci: 2464 spider_net_undo_pci_setup(card); 2465 free_netdev(card->netdev); 2466 out: 2467 return err; 2468 } 2469 2470 /** 2471 * spider_net_remove - removal of a device 2472 * @pdev: PCI device 2473 * 2474 * Returns 0 on success, <0 on failure 2475 * 2476 * spider_net_remove is called to remove the device and unregisters the 2477 * net_device 2478 **/ 2479 static void 2480 spider_net_remove(struct pci_dev *pdev) 2481 { 2482 struct net_device *netdev; 2483 struct spider_net_card *card; 2484 2485 netdev = pci_get_drvdata(pdev); 2486 card = netdev_priv(netdev); 2487 2488 wait_event(card->waitq, 2489 atomic_read(&card->tx_timeout_task_counter) == 0); 2490 2491 unregister_netdev(netdev); 2492 2493 /* switch off card */ 2494 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2495 SPIDER_NET_CKRCTRL_STOP_VALUE); 2496 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2497 SPIDER_NET_CKRCTRL_RUN_VALUE); 2498 2499 spider_net_undo_pci_setup(card); 2500 free_netdev(netdev); 2501 } 2502 2503 static struct pci_driver spider_net_driver = { 2504 .name = spider_net_driver_name, 2505 .id_table = spider_net_pci_tbl, 2506 .probe = spider_net_probe, 2507 .remove = spider_net_remove 2508 }; 2509 2510 /** 2511 * spider_net_init - init function when the driver is loaded 2512 * 2513 * spider_net_init registers the device driver 2514 */ 2515 static int __init spider_net_init(void) 2516 { 2517 printk(KERN_INFO "Spidernet version %s.\n", VERSION); 2518 2519 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) { 2520 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN; 2521 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); 2522 } 2523 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) { 2524 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX; 2525 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); 2526 } 2527 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) { 2528 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN; 2529 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); 2530 } 2531 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) { 2532 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX; 2533 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); 2534 } 2535 2536 return pci_register_driver(&spider_net_driver); 2537 } 2538 2539 /** 2540 * spider_net_cleanup - exit function when driver is unloaded 2541 * 2542 * spider_net_cleanup unregisters the device driver 2543 */ 2544 static void __exit spider_net_cleanup(void) 2545 { 2546 pci_unregister_driver(&spider_net_driver); 2547 } 2548 2549 module_init(spider_net_init); 2550 module_exit(spider_net_cleanup); 2551