1 /* 2 * Network device driver for Cell Processor-Based Blade and Celleb platform 3 * 4 * (C) Copyright IBM Corp. 2005 5 * (C) Copyright 2006 TOSHIBA CORPORATION 6 * 7 * Authors : Utz Bacher <utz.bacher@de.ibm.com> 8 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25 #include <linux/compiler.h> 26 #include <linux/crc32.h> 27 #include <linux/delay.h> 28 #include <linux/etherdevice.h> 29 #include <linux/ethtool.h> 30 #include <linux/firmware.h> 31 #include <linux/if_vlan.h> 32 #include <linux/in.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/gfp.h> 36 #include <linux/ioport.h> 37 #include <linux/ip.h> 38 #include <linux/kernel.h> 39 #include <linux/mii.h> 40 #include <linux/module.h> 41 #include <linux/netdevice.h> 42 #include <linux/device.h> 43 #include <linux/pci.h> 44 #include <linux/skbuff.h> 45 #include <linux/tcp.h> 46 #include <linux/types.h> 47 #include <linux/vmalloc.h> 48 #include <linux/wait.h> 49 #include <linux/workqueue.h> 50 #include <linux/bitops.h> 51 #include <net/checksum.h> 52 53 #include "spider_net.h" 54 55 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \ 56 "<Jens.Osterkamp@de.ibm.com>"); 57 MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); 58 MODULE_LICENSE("GPL"); 59 MODULE_VERSION(VERSION); 60 MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME); 61 62 static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; 63 static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; 64 65 module_param(rx_descriptors, int, 0444); 66 module_param(tx_descriptors, int, 0444); 67 68 MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \ 69 "in rx chains"); 70 MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \ 71 "in tx chain"); 72 73 char spider_net_driver_name[] = "spidernet"; 74 75 static const struct pci_device_id spider_net_pci_tbl[] = { 76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, 77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 78 { 0, } 79 }; 80 81 MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); 82 83 /** 84 * spider_net_read_reg - reads an SMMIO register of a card 85 * @card: device structure 86 * @reg: register to read from 87 * 88 * returns the content of the specified SMMIO register. 89 */ 90 static inline u32 91 spider_net_read_reg(struct spider_net_card *card, u32 reg) 92 { 93 /* We use the powerpc specific variants instead of readl_be() because 94 * we know spidernet is not a real PCI device and we can thus avoid the 95 * performance hit caused by the PCI workarounds. 96 */ 97 return in_be32(card->regs + reg); 98 } 99 100 /** 101 * spider_net_write_reg - writes to an SMMIO register of a card 102 * @card: device structure 103 * @reg: register to write to 104 * @value: value to write into the specified SMMIO register 105 */ 106 static inline void 107 spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) 108 { 109 /* We use the powerpc specific variants instead of writel_be() because 110 * we know spidernet is not a real PCI device and we can thus avoid the 111 * performance hit caused by the PCI workarounds. 112 */ 113 out_be32(card->regs + reg, value); 114 } 115 116 /** 117 * spider_net_write_phy - write to phy register 118 * @netdev: adapter to be written to 119 * @mii_id: id of MII 120 * @reg: PHY register 121 * @val: value to be written to phy register 122 * 123 * spider_net_write_phy_register writes to an arbitrary PHY 124 * register via the spider GPCWOPCMD register. We assume the queue does 125 * not run full (not more than 15 commands outstanding). 126 **/ 127 static void 128 spider_net_write_phy(struct net_device *netdev, int mii_id, 129 int reg, int val) 130 { 131 struct spider_net_card *card = netdev_priv(netdev); 132 u32 writevalue; 133 134 writevalue = ((u32)mii_id << 21) | 135 ((u32)reg << 16) | ((u32)val); 136 137 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue); 138 } 139 140 /** 141 * spider_net_read_phy - read from phy register 142 * @netdev: network device to be read from 143 * @mii_id: id of MII 144 * @reg: PHY register 145 * 146 * Returns value read from PHY register 147 * 148 * spider_net_write_phy reads from an arbitrary PHY 149 * register via the spider GPCROPCMD register 150 **/ 151 static int 152 spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) 153 { 154 struct spider_net_card *card = netdev_priv(netdev); 155 u32 readvalue; 156 157 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16); 158 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue); 159 160 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT 161 * interrupt, as we poll for the completion of the read operation 162 * in spider_net_read_phy. Should take about 50 us */ 163 do { 164 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD); 165 } while (readvalue & SPIDER_NET_GPREXEC); 166 167 readvalue &= SPIDER_NET_GPRDAT_MASK; 168 169 return readvalue; 170 } 171 172 /** 173 * spider_net_setup_aneg - initial auto-negotiation setup 174 * @card: device structure 175 **/ 176 static void 177 spider_net_setup_aneg(struct spider_net_card *card) 178 { 179 struct mii_phy *phy = &card->phy; 180 u32 advertise = 0; 181 u16 bmsr, estat; 182 183 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); 184 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS); 185 186 if (bmsr & BMSR_10HALF) 187 advertise |= ADVERTISED_10baseT_Half; 188 if (bmsr & BMSR_10FULL) 189 advertise |= ADVERTISED_10baseT_Full; 190 if (bmsr & BMSR_100HALF) 191 advertise |= ADVERTISED_100baseT_Half; 192 if (bmsr & BMSR_100FULL) 193 advertise |= ADVERTISED_100baseT_Full; 194 195 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL)) 196 advertise |= SUPPORTED_1000baseT_Full; 197 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF)) 198 advertise |= SUPPORTED_1000baseT_Half; 199 200 sungem_phy_probe(phy, phy->mii_id); 201 phy->def->ops->setup_aneg(phy, advertise); 202 203 } 204 205 /** 206 * spider_net_rx_irq_off - switch off rx irq on this spider card 207 * @card: device structure 208 * 209 * switches off rx irq by masking them out in the GHIINTnMSK register 210 */ 211 static void 212 spider_net_rx_irq_off(struct spider_net_card *card) 213 { 214 u32 regvalue; 215 216 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT); 217 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); 218 } 219 220 /** 221 * spider_net_rx_irq_on - switch on rx irq on this spider card 222 * @card: device structure 223 * 224 * switches on rx irq by enabling them in the GHIINTnMSK register 225 */ 226 static void 227 spider_net_rx_irq_on(struct spider_net_card *card) 228 { 229 u32 regvalue; 230 231 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT; 232 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); 233 } 234 235 /** 236 * spider_net_set_promisc - sets the unicast address or the promiscuous mode 237 * @card: card structure 238 * 239 * spider_net_set_promisc sets the unicast destination address filter and 240 * thus either allows for non-promisc mode or promisc mode 241 */ 242 static void 243 spider_net_set_promisc(struct spider_net_card *card) 244 { 245 u32 macu, macl; 246 struct net_device *netdev = card->netdev; 247 248 if (netdev->flags & IFF_PROMISC) { 249 /* clear destination entry 0 */ 250 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0); 251 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0); 252 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 253 SPIDER_NET_PROMISC_VALUE); 254 } else { 255 macu = netdev->dev_addr[0]; 256 macu <<= 8; 257 macu |= netdev->dev_addr[1]; 258 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl)); 259 260 macu |= SPIDER_NET_UA_DESCR_VALUE; 261 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu); 262 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl); 263 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 264 SPIDER_NET_NONPROMISC_VALUE); 265 } 266 } 267 268 /** 269 * spider_net_get_descr_status -- returns the status of a descriptor 270 * @descr: descriptor to look at 271 * 272 * returns the status as in the dmac_cmd_status field of the descriptor 273 */ 274 static inline int 275 spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr) 276 { 277 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; 278 } 279 280 /** 281 * spider_net_free_chain - free descriptor chain 282 * @card: card structure 283 * @chain: address of chain 284 * 285 */ 286 static void 287 spider_net_free_chain(struct spider_net_card *card, 288 struct spider_net_descr_chain *chain) 289 { 290 struct spider_net_descr *descr; 291 292 descr = chain->ring; 293 do { 294 descr->bus_addr = 0; 295 descr->hwdescr->next_descr_addr = 0; 296 descr = descr->next; 297 } while (descr != chain->ring); 298 299 dma_free_coherent(&card->pdev->dev, chain->num_desc, 300 chain->hwring, chain->dma_addr); 301 } 302 303 /** 304 * spider_net_init_chain - alloc and link descriptor chain 305 * @card: card structure 306 * @chain: address of chain 307 * 308 * We manage a circular list that mirrors the hardware structure, 309 * except that the hardware uses bus addresses. 310 * 311 * Returns 0 on success, <0 on failure 312 */ 313 static int 314 spider_net_init_chain(struct spider_net_card *card, 315 struct spider_net_descr_chain *chain) 316 { 317 int i; 318 struct spider_net_descr *descr; 319 struct spider_net_hw_descr *hwdescr; 320 dma_addr_t buf; 321 size_t alloc_size; 322 323 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); 324 325 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, 326 &chain->dma_addr, GFP_KERNEL); 327 if (!chain->hwring) 328 return -ENOMEM; 329 330 memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); 331 332 /* Set up the hardware pointers in each descriptor */ 333 descr = chain->ring; 334 hwdescr = chain->hwring; 335 buf = chain->dma_addr; 336 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) { 337 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 338 hwdescr->next_descr_addr = 0; 339 340 descr->hwdescr = hwdescr; 341 descr->bus_addr = buf; 342 descr->next = descr + 1; 343 descr->prev = descr - 1; 344 345 buf += sizeof(struct spider_net_hw_descr); 346 } 347 /* do actual circular list */ 348 (descr-1)->next = chain->ring; 349 chain->ring->prev = descr-1; 350 351 spin_lock_init(&chain->lock); 352 chain->head = chain->ring; 353 chain->tail = chain->ring; 354 return 0; 355 } 356 357 /** 358 * spider_net_free_rx_chain_contents - frees descr contents in rx chain 359 * @card: card structure 360 * 361 * returns 0 on success, <0 on failure 362 */ 363 static void 364 spider_net_free_rx_chain_contents(struct spider_net_card *card) 365 { 366 struct spider_net_descr *descr; 367 368 descr = card->rx_chain.head; 369 do { 370 if (descr->skb) { 371 pci_unmap_single(card->pdev, descr->hwdescr->buf_addr, 372 SPIDER_NET_MAX_FRAME, 373 PCI_DMA_BIDIRECTIONAL); 374 dev_kfree_skb(descr->skb); 375 descr->skb = NULL; 376 } 377 descr = descr->next; 378 } while (descr != card->rx_chain.head); 379 } 380 381 /** 382 * spider_net_prepare_rx_descr - Reinitialize RX descriptor 383 * @card: card structure 384 * @descr: descriptor to re-init 385 * 386 * Return 0 on success, <0 on failure. 387 * 388 * Allocates a new rx skb, iommu-maps it and attaches it to the 389 * descriptor. Mark the descriptor as activated, ready-to-use. 390 */ 391 static int 392 spider_net_prepare_rx_descr(struct spider_net_card *card, 393 struct spider_net_descr *descr) 394 { 395 struct spider_net_hw_descr *hwdescr = descr->hwdescr; 396 dma_addr_t buf; 397 int offset; 398 int bufsize; 399 400 /* we need to round up the buffer size to a multiple of 128 */ 401 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) & 402 (~(SPIDER_NET_RXBUF_ALIGN - 1)); 403 404 /* and we need to have it 128 byte aligned, therefore we allocate a 405 * bit more */ 406 /* allocate an skb */ 407 descr->skb = netdev_alloc_skb(card->netdev, 408 bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 409 if (!descr->skb) { 410 if (netif_msg_rx_err(card) && net_ratelimit()) 411 dev_err(&card->netdev->dev, 412 "Not enough memory to allocate rx buffer\n"); 413 card->spider_stats.alloc_rx_skb_error++; 414 return -ENOMEM; 415 } 416 hwdescr->buf_size = bufsize; 417 hwdescr->result_size = 0; 418 hwdescr->valid_size = 0; 419 hwdescr->data_status = 0; 420 hwdescr->data_error = 0; 421 422 offset = ((unsigned long)descr->skb->data) & 423 (SPIDER_NET_RXBUF_ALIGN - 1); 424 if (offset) 425 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 426 /* iommu-map the skb */ 427 buf = pci_map_single(card->pdev, descr->skb->data, 428 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 429 if (pci_dma_mapping_error(card->pdev, buf)) { 430 dev_kfree_skb_any(descr->skb); 431 descr->skb = NULL; 432 if (netif_msg_rx_err(card) && net_ratelimit()) 433 dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n"); 434 card->spider_stats.rx_iommu_map_error++; 435 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 436 } else { 437 hwdescr->buf_addr = buf; 438 wmb(); 439 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 440 SPIDER_NET_DMAC_NOINTR_COMPLETE; 441 } 442 443 return 0; 444 } 445 446 /** 447 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses 448 * @card: card structure 449 * 450 * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the 451 * chip by writing to the appropriate register. DMA is enabled in 452 * spider_net_enable_rxdmac. 453 */ 454 static inline void 455 spider_net_enable_rxchtails(struct spider_net_card *card) 456 { 457 /* assume chain is aligned correctly */ 458 spider_net_write_reg(card, SPIDER_NET_GDADCHA , 459 card->rx_chain.tail->bus_addr); 460 } 461 462 /** 463 * spider_net_enable_rxdmac - enables a receive DMA controller 464 * @card: card structure 465 * 466 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 467 * in the GDADMACCNTR register 468 */ 469 static inline void 470 spider_net_enable_rxdmac(struct spider_net_card *card) 471 { 472 wmb(); 473 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 474 SPIDER_NET_DMA_RX_VALUE); 475 } 476 477 /** 478 * spider_net_disable_rxdmac - disables the receive DMA controller 479 * @card: card structure 480 * 481 * spider_net_disable_rxdmac terminates processing on the DMA controller 482 * by turing off the DMA controller, with the force-end flag set. 483 */ 484 static inline void 485 spider_net_disable_rxdmac(struct spider_net_card *card) 486 { 487 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 488 SPIDER_NET_DMA_RX_FEND_VALUE); 489 } 490 491 /** 492 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 493 * @card: card structure 494 * 495 * refills descriptors in the rx chain: allocates skbs and iommu-maps them. 496 */ 497 static void 498 spider_net_refill_rx_chain(struct spider_net_card *card) 499 { 500 struct spider_net_descr_chain *chain = &card->rx_chain; 501 unsigned long flags; 502 503 /* one context doing the refill (and a second context seeing that 504 * and omitting it) is ok. If called by NAPI, we'll be called again 505 * as spider_net_decode_one_descr is called several times. If some 506 * interrupt calls us, the NAPI is about to clean up anyway. */ 507 if (!spin_trylock_irqsave(&chain->lock, flags)) 508 return; 509 510 while (spider_net_get_descr_status(chain->head->hwdescr) == 511 SPIDER_NET_DESCR_NOT_IN_USE) { 512 if (spider_net_prepare_rx_descr(card, chain->head)) 513 break; 514 chain->head = chain->head->next; 515 } 516 517 spin_unlock_irqrestore(&chain->lock, flags); 518 } 519 520 /** 521 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains 522 * @card: card structure 523 * 524 * Returns 0 on success, <0 on failure. 525 */ 526 static int 527 spider_net_alloc_rx_skbs(struct spider_net_card *card) 528 { 529 struct spider_net_descr_chain *chain = &card->rx_chain; 530 struct spider_net_descr *start = chain->tail; 531 struct spider_net_descr *descr = start; 532 533 /* Link up the hardware chain pointers */ 534 do { 535 descr->prev->hwdescr->next_descr_addr = descr->bus_addr; 536 descr = descr->next; 537 } while (descr != start); 538 539 /* Put at least one buffer into the chain. if this fails, 540 * we've got a problem. If not, spider_net_refill_rx_chain 541 * will do the rest at the end of this function. */ 542 if (spider_net_prepare_rx_descr(card, chain->head)) 543 goto error; 544 else 545 chain->head = chain->head->next; 546 547 /* This will allocate the rest of the rx buffers; 548 * if not, it's business as usual later on. */ 549 spider_net_refill_rx_chain(card); 550 spider_net_enable_rxdmac(card); 551 return 0; 552 553 error: 554 spider_net_free_rx_chain_contents(card); 555 return -ENOMEM; 556 } 557 558 /** 559 * spider_net_get_multicast_hash - generates hash for multicast filter table 560 * @addr: multicast address 561 * 562 * returns the hash value. 563 * 564 * spider_net_get_multicast_hash calculates a hash value for a given multicast 565 * address, that is used to set the multicast filter tables 566 */ 567 static u8 568 spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) 569 { 570 u32 crc; 571 u8 hash; 572 char addr_for_crc[ETH_ALEN] = { 0, }; 573 int i, bit; 574 575 for (i = 0; i < ETH_ALEN * 8; i++) { 576 bit = (addr[i / 8] >> (i % 8)) & 1; 577 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8)); 578 } 579 580 crc = crc32_be(~0, addr_for_crc, netdev->addr_len); 581 582 hash = (crc >> 27); 583 hash <<= 3; 584 hash |= crc & 7; 585 hash &= 0xff; 586 587 return hash; 588 } 589 590 /** 591 * spider_net_set_multi - sets multicast addresses and promisc flags 592 * @netdev: interface device structure 593 * 594 * spider_net_set_multi configures multicast addresses as needed for the 595 * netdev interface. It also sets up multicast, allmulti and promisc 596 * flags appropriately 597 */ 598 static void 599 spider_net_set_multi(struct net_device *netdev) 600 { 601 struct netdev_hw_addr *ha; 602 u8 hash; 603 int i; 604 u32 reg; 605 struct spider_net_card *card = netdev_priv(netdev); 606 DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {}; 607 608 spider_net_set_promisc(card); 609 610 if (netdev->flags & IFF_ALLMULTI) { 611 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) { 612 set_bit(i, bitmask); 613 } 614 goto write_hash; 615 } 616 617 /* well, we know, what the broadcast hash value is: it's xfd 618 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ 619 set_bit(0xfd, bitmask); 620 621 netdev_for_each_mc_addr(ha, netdev) { 622 hash = spider_net_get_multicast_hash(netdev, ha->addr); 623 set_bit(hash, bitmask); 624 } 625 626 write_hash: 627 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) { 628 reg = 0; 629 if (test_bit(i * 4, bitmask)) 630 reg += 0x08; 631 reg <<= 8; 632 if (test_bit(i * 4 + 1, bitmask)) 633 reg += 0x08; 634 reg <<= 8; 635 if (test_bit(i * 4 + 2, bitmask)) 636 reg += 0x08; 637 reg <<= 8; 638 if (test_bit(i * 4 + 3, bitmask)) 639 reg += 0x08; 640 641 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg); 642 } 643 } 644 645 /** 646 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 647 * @card: card structure 648 * @skb: packet to use 649 * 650 * returns 0 on success, <0 on failure. 651 * 652 * fills out the descriptor structure with skb data and len. Copies data, 653 * if needed (32bit DMA!) 654 */ 655 static int 656 spider_net_prepare_tx_descr(struct spider_net_card *card, 657 struct sk_buff *skb) 658 { 659 struct spider_net_descr_chain *chain = &card->tx_chain; 660 struct spider_net_descr *descr; 661 struct spider_net_hw_descr *hwdescr; 662 dma_addr_t buf; 663 unsigned long flags; 664 665 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 666 if (pci_dma_mapping_error(card->pdev, buf)) { 667 if (netif_msg_tx_err(card) && net_ratelimit()) 668 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " 669 "Dropping packet\n", skb->data, skb->len); 670 card->spider_stats.tx_iommu_map_error++; 671 return -ENOMEM; 672 } 673 674 spin_lock_irqsave(&chain->lock, flags); 675 descr = card->tx_chain.head; 676 if (descr->next == chain->tail->prev) { 677 spin_unlock_irqrestore(&chain->lock, flags); 678 pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE); 679 return -ENOMEM; 680 } 681 hwdescr = descr->hwdescr; 682 chain->head = descr->next; 683 684 descr->skb = skb; 685 hwdescr->buf_addr = buf; 686 hwdescr->buf_size = skb->len; 687 hwdescr->next_descr_addr = 0; 688 hwdescr->data_status = 0; 689 690 hwdescr->dmac_cmd_status = 691 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL; 692 spin_unlock_irqrestore(&chain->lock, flags); 693 694 if (skb->ip_summed == CHECKSUM_PARTIAL) 695 switch (ip_hdr(skb)->protocol) { 696 case IPPROTO_TCP: 697 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; 698 break; 699 case IPPROTO_UDP: 700 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; 701 break; 702 } 703 704 /* Chain the bus address, so that the DMA engine finds this descr. */ 705 wmb(); 706 descr->prev->hwdescr->next_descr_addr = descr->bus_addr; 707 708 netif_trans_update(card->netdev); /* set netdev watchdog timer */ 709 return 0; 710 } 711 712 static int 713 spider_net_set_low_watermark(struct spider_net_card *card) 714 { 715 struct spider_net_descr *descr = card->tx_chain.tail; 716 struct spider_net_hw_descr *hwdescr; 717 unsigned long flags; 718 int status; 719 int cnt=0; 720 int i; 721 722 /* Measure the length of the queue. Measurement does not 723 * need to be precise -- does not need a lock. */ 724 while (descr != card->tx_chain.head) { 725 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; 726 if (status == SPIDER_NET_DESCR_NOT_IN_USE) 727 break; 728 descr = descr->next; 729 cnt++; 730 } 731 732 /* If TX queue is short, don't even bother with interrupts */ 733 if (cnt < card->tx_chain.num_desc/4) 734 return cnt; 735 736 /* Set low-watermark 3/4th's of the way into the queue. */ 737 descr = card->tx_chain.tail; 738 cnt = (cnt*3)/4; 739 for (i=0;i<cnt; i++) 740 descr = descr->next; 741 742 /* Set the new watermark, clear the old watermark */ 743 spin_lock_irqsave(&card->tx_chain.lock, flags); 744 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; 745 if (card->low_watermark && card->low_watermark != descr) { 746 hwdescr = card->low_watermark->hwdescr; 747 hwdescr->dmac_cmd_status = 748 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; 749 } 750 card->low_watermark = descr; 751 spin_unlock_irqrestore(&card->tx_chain.lock, flags); 752 return cnt; 753 } 754 755 /** 756 * spider_net_release_tx_chain - processes sent tx descriptors 757 * @card: adapter structure 758 * @brutal: if set, don't care about whether descriptor seems to be in use 759 * 760 * returns 0 if the tx ring is empty, otherwise 1. 761 * 762 * spider_net_release_tx_chain releases the tx descriptors that spider has 763 * finished with (if non-brutal) or simply release tx descriptors (if brutal). 764 * If some other context is calling this function, we return 1 so that we're 765 * scheduled again (if we were scheduled) and will not lose initiative. 766 */ 767 static int 768 spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 769 { 770 struct net_device *dev = card->netdev; 771 struct spider_net_descr_chain *chain = &card->tx_chain; 772 struct spider_net_descr *descr; 773 struct spider_net_hw_descr *hwdescr; 774 struct sk_buff *skb; 775 u32 buf_addr; 776 unsigned long flags; 777 int status; 778 779 while (1) { 780 spin_lock_irqsave(&chain->lock, flags); 781 if (chain->tail == chain->head) { 782 spin_unlock_irqrestore(&chain->lock, flags); 783 return 0; 784 } 785 descr = chain->tail; 786 hwdescr = descr->hwdescr; 787 788 status = spider_net_get_descr_status(hwdescr); 789 switch (status) { 790 case SPIDER_NET_DESCR_COMPLETE: 791 dev->stats.tx_packets++; 792 dev->stats.tx_bytes += descr->skb->len; 793 break; 794 795 case SPIDER_NET_DESCR_CARDOWNED: 796 if (!brutal) { 797 spin_unlock_irqrestore(&chain->lock, flags); 798 return 1; 799 } 800 801 /* fallthrough, if we release the descriptors 802 * brutally (then we don't care about 803 * SPIDER_NET_DESCR_CARDOWNED) */ 804 805 case SPIDER_NET_DESCR_RESPONSE_ERROR: 806 case SPIDER_NET_DESCR_PROTECTION_ERROR: 807 case SPIDER_NET_DESCR_FORCE_END: 808 if (netif_msg_tx_err(card)) 809 dev_err(&card->netdev->dev, "forcing end of tx descriptor " 810 "with status x%02x\n", status); 811 dev->stats.tx_errors++; 812 break; 813 814 default: 815 dev->stats.tx_dropped++; 816 if (!brutal) { 817 spin_unlock_irqrestore(&chain->lock, flags); 818 return 1; 819 } 820 } 821 822 chain->tail = descr->next; 823 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 824 skb = descr->skb; 825 descr->skb = NULL; 826 buf_addr = hwdescr->buf_addr; 827 spin_unlock_irqrestore(&chain->lock, flags); 828 829 /* unmap the skb */ 830 if (skb) { 831 pci_unmap_single(card->pdev, buf_addr, skb->len, 832 PCI_DMA_TODEVICE); 833 dev_consume_skb_any(skb); 834 } 835 } 836 return 0; 837 } 838 839 /** 840 * spider_net_kick_tx_dma - enables TX DMA processing 841 * @card: card structure 842 * 843 * This routine will start the transmit DMA running if 844 * it is not already running. This routine ned only be 845 * called when queueing a new packet to an empty tx queue. 846 * Writes the current tx chain head as start address 847 * of the tx descriptor chain and enables the transmission 848 * DMA engine. 849 */ 850 static inline void 851 spider_net_kick_tx_dma(struct spider_net_card *card) 852 { 853 struct spider_net_descr *descr; 854 855 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & 856 SPIDER_NET_TX_DMA_EN) 857 goto out; 858 859 descr = card->tx_chain.tail; 860 for (;;) { 861 if (spider_net_get_descr_status(descr->hwdescr) == 862 SPIDER_NET_DESCR_CARDOWNED) { 863 spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 864 descr->bus_addr); 865 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 866 SPIDER_NET_DMA_TX_VALUE); 867 break; 868 } 869 if (descr == card->tx_chain.head) 870 break; 871 descr = descr->next; 872 } 873 874 out: 875 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 876 } 877 878 /** 879 * spider_net_xmit - transmits a frame over the device 880 * @skb: packet to send out 881 * @netdev: interface device structure 882 * 883 * returns 0 on success, !0 on failure 884 */ 885 static int 886 spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 887 { 888 int cnt; 889 struct spider_net_card *card = netdev_priv(netdev); 890 891 spider_net_release_tx_chain(card, 0); 892 893 if (spider_net_prepare_tx_descr(card, skb) != 0) { 894 netdev->stats.tx_dropped++; 895 netif_stop_queue(netdev); 896 return NETDEV_TX_BUSY; 897 } 898 899 cnt = spider_net_set_low_watermark(card); 900 if (cnt < 5) 901 spider_net_kick_tx_dma(card); 902 return NETDEV_TX_OK; 903 } 904 905 /** 906 * spider_net_cleanup_tx_ring - cleans up the TX ring 907 * @card: card structure 908 * 909 * spider_net_cleanup_tx_ring is called by either the tx_timer 910 * or from the NAPI polling routine. 911 * This routine releases resources associted with transmitted 912 * packets, including updating the queue tail pointer. 913 */ 914 static void 915 spider_net_cleanup_tx_ring(struct spider_net_card *card) 916 { 917 if ((spider_net_release_tx_chain(card, 0) != 0) && 918 (card->netdev->flags & IFF_UP)) { 919 spider_net_kick_tx_dma(card); 920 netif_wake_queue(card->netdev); 921 } 922 } 923 924 /** 925 * spider_net_do_ioctl - called for device ioctls 926 * @netdev: interface device structure 927 * @ifr: request parameter structure for ioctl 928 * @cmd: command code for ioctl 929 * 930 * returns 0 on success, <0 on failure. Currently, we have no special ioctls. 931 * -EOPNOTSUPP is returned, if an unknown ioctl was requested 932 */ 933 static int 934 spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 935 { 936 switch (cmd) { 937 default: 938 return -EOPNOTSUPP; 939 } 940 } 941 942 /** 943 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 944 * @descr: descriptor to process 945 * @card: card structure 946 * 947 * Fills out skb structure and passes the data to the stack. 948 * The descriptor state is not changed. 949 */ 950 static void 951 spider_net_pass_skb_up(struct spider_net_descr *descr, 952 struct spider_net_card *card) 953 { 954 struct spider_net_hw_descr *hwdescr = descr->hwdescr; 955 struct sk_buff *skb = descr->skb; 956 struct net_device *netdev = card->netdev; 957 u32 data_status = hwdescr->data_status; 958 u32 data_error = hwdescr->data_error; 959 960 skb_put(skb, hwdescr->valid_size); 961 962 /* the card seems to add 2 bytes of junk in front 963 * of the ethernet frame */ 964 #define SPIDER_MISALIGN 2 965 skb_pull(skb, SPIDER_MISALIGN); 966 skb->protocol = eth_type_trans(skb, netdev); 967 968 /* checksum offload */ 969 skb_checksum_none_assert(skb); 970 if (netdev->features & NETIF_F_RXCSUM) { 971 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) == 972 SPIDER_NET_DATA_STATUS_CKSUM_MASK) && 973 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) 974 skb->ip_summed = CHECKSUM_UNNECESSARY; 975 } 976 977 if (data_status & SPIDER_NET_VLAN_PACKET) { 978 /* further enhancements: HW-accel VLAN */ 979 } 980 981 /* update netdevice statistics */ 982 netdev->stats.rx_packets++; 983 netdev->stats.rx_bytes += skb->len; 984 985 /* pass skb up to stack */ 986 netif_receive_skb(skb); 987 } 988 989 static void show_rx_chain(struct spider_net_card *card) 990 { 991 struct spider_net_descr_chain *chain = &card->rx_chain; 992 struct spider_net_descr *start= chain->tail; 993 struct spider_net_descr *descr= start; 994 struct spider_net_hw_descr *hwd = start->hwdescr; 995 struct device *dev = &card->netdev->dev; 996 u32 curr_desc, next_desc; 997 int status; 998 999 int tot = 0; 1000 int cnt = 0; 1001 int off = start - chain->ring; 1002 int cstat = hwd->dmac_cmd_status; 1003 1004 dev_info(dev, "Total number of descrs=%d\n", 1005 chain->num_desc); 1006 dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n", 1007 off, cstat); 1008 1009 curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA); 1010 next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA); 1011 1012 status = cstat; 1013 do 1014 { 1015 hwd = descr->hwdescr; 1016 off = descr - chain->ring; 1017 status = hwd->dmac_cmd_status; 1018 1019 if (descr == chain->head) 1020 dev_info(dev, "Chain head is at %d, head status=0x%x\n", 1021 off, status); 1022 1023 if (curr_desc == descr->bus_addr) 1024 dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n", 1025 off, status); 1026 1027 if (next_desc == descr->bus_addr) 1028 dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n", 1029 off, status); 1030 1031 if (hwd->next_descr_addr == 0) 1032 dev_info(dev, "chain is cut at %d\n", off); 1033 1034 if (cstat != status) { 1035 int from = (chain->num_desc + off - cnt) % chain->num_desc; 1036 int to = (chain->num_desc + off - 1) % chain->num_desc; 1037 dev_info(dev, "Have %d (from %d to %d) descrs " 1038 "with stat=0x%08x\n", cnt, from, to, cstat); 1039 cstat = status; 1040 cnt = 0; 1041 } 1042 1043 cnt ++; 1044 tot ++; 1045 descr = descr->next; 1046 } while (descr != start); 1047 1048 dev_info(dev, "Last %d descrs with stat=0x%08x " 1049 "for a total of %d descrs\n", cnt, cstat, tot); 1050 1051 #ifdef DEBUG 1052 /* Now dump the whole ring */ 1053 descr = start; 1054 do 1055 { 1056 struct spider_net_hw_descr *hwd = descr->hwdescr; 1057 status = spider_net_get_descr_status(hwd); 1058 cnt = descr - chain->ring; 1059 dev_info(dev, "Descr %d stat=0x%08x skb=%p\n", 1060 cnt, status, descr->skb); 1061 dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n", 1062 descr->bus_addr, hwd->buf_addr, hwd->buf_size); 1063 dev_info(dev, "next=%08x result sz=%d valid sz=%d\n", 1064 hwd->next_descr_addr, hwd->result_size, 1065 hwd->valid_size); 1066 dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n", 1067 hwd->dmac_cmd_status, hwd->data_status, 1068 hwd->data_error); 1069 dev_info(dev, "\n"); 1070 1071 descr = descr->next; 1072 } while (descr != start); 1073 #endif 1074 1075 } 1076 1077 /** 1078 * spider_net_resync_head_ptr - Advance head ptr past empty descrs 1079 * 1080 * If the driver fails to keep up and empty the queue, then the 1081 * hardware wil run out of room to put incoming packets. This 1082 * will cause the hardware to skip descrs that are full (instead 1083 * of halting/retrying). Thus, once the driver runs, it wil need 1084 * to "catch up" to where the hardware chain pointer is at. 1085 */ 1086 static void spider_net_resync_head_ptr(struct spider_net_card *card) 1087 { 1088 unsigned long flags; 1089 struct spider_net_descr_chain *chain = &card->rx_chain; 1090 struct spider_net_descr *descr; 1091 int i, status; 1092 1093 /* Advance head pointer past any empty descrs */ 1094 descr = chain->head; 1095 status = spider_net_get_descr_status(descr->hwdescr); 1096 1097 if (status == SPIDER_NET_DESCR_NOT_IN_USE) 1098 return; 1099 1100 spin_lock_irqsave(&chain->lock, flags); 1101 1102 descr = chain->head; 1103 status = spider_net_get_descr_status(descr->hwdescr); 1104 for (i=0; i<chain->num_desc; i++) { 1105 if (status != SPIDER_NET_DESCR_CARDOWNED) break; 1106 descr = descr->next; 1107 status = spider_net_get_descr_status(descr->hwdescr); 1108 } 1109 chain->head = descr; 1110 1111 spin_unlock_irqrestore(&chain->lock, flags); 1112 } 1113 1114 static int spider_net_resync_tail_ptr(struct spider_net_card *card) 1115 { 1116 struct spider_net_descr_chain *chain = &card->rx_chain; 1117 struct spider_net_descr *descr; 1118 int i, status; 1119 1120 /* Advance tail pointer past any empty and reaped descrs */ 1121 descr = chain->tail; 1122 status = spider_net_get_descr_status(descr->hwdescr); 1123 1124 for (i=0; i<chain->num_desc; i++) { 1125 if ((status != SPIDER_NET_DESCR_CARDOWNED) && 1126 (status != SPIDER_NET_DESCR_NOT_IN_USE)) break; 1127 descr = descr->next; 1128 status = spider_net_get_descr_status(descr->hwdescr); 1129 } 1130 chain->tail = descr; 1131 1132 if ((i == chain->num_desc) || (i == 0)) 1133 return 1; 1134 return 0; 1135 } 1136 1137 /** 1138 * spider_net_decode_one_descr - processes an RX descriptor 1139 * @card: card structure 1140 * 1141 * Returns 1 if a packet has been sent to the stack, otherwise 0. 1142 * 1143 * Processes an RX descriptor by iommu-unmapping the data buffer 1144 * and passing the packet up to the stack. This function is called 1145 * in softirq context, e.g. either bottom half from interrupt or 1146 * NAPI polling context. 1147 */ 1148 static int 1149 spider_net_decode_one_descr(struct spider_net_card *card) 1150 { 1151 struct net_device *dev = card->netdev; 1152 struct spider_net_descr_chain *chain = &card->rx_chain; 1153 struct spider_net_descr *descr = chain->tail; 1154 struct spider_net_hw_descr *hwdescr = descr->hwdescr; 1155 u32 hw_buf_addr; 1156 int status; 1157 1158 status = spider_net_get_descr_status(hwdescr); 1159 1160 /* Nothing in the descriptor, or ring must be empty */ 1161 if ((status == SPIDER_NET_DESCR_CARDOWNED) || 1162 (status == SPIDER_NET_DESCR_NOT_IN_USE)) 1163 return 0; 1164 1165 /* descriptor definitively used -- move on tail */ 1166 chain->tail = descr->next; 1167 1168 /* unmap descriptor */ 1169 hw_buf_addr = hwdescr->buf_addr; 1170 hwdescr->buf_addr = 0xffffffff; 1171 pci_unmap_single(card->pdev, hw_buf_addr, 1172 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 1173 1174 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1175 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || 1176 (status == SPIDER_NET_DESCR_FORCE_END) ) { 1177 if (netif_msg_rx_err(card)) 1178 dev_err(&dev->dev, 1179 "dropping RX descriptor with state %d\n", status); 1180 dev->stats.rx_dropped++; 1181 goto bad_desc; 1182 } 1183 1184 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1185 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1186 if (netif_msg_rx_err(card)) 1187 dev_err(&card->netdev->dev, 1188 "RX descriptor with unknown state %d\n", status); 1189 card->spider_stats.rx_desc_unk_state++; 1190 goto bad_desc; 1191 } 1192 1193 /* The cases we'll throw away the packet immediately */ 1194 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 1195 if (netif_msg_rx_err(card)) 1196 dev_err(&card->netdev->dev, 1197 "error in received descriptor found, " 1198 "data_status=x%08x, data_error=x%08x\n", 1199 hwdescr->data_status, hwdescr->data_error); 1200 goto bad_desc; 1201 } 1202 1203 if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) { 1204 dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n", 1205 hwdescr->dmac_cmd_status); 1206 pr_err("buf_addr=x%08x\n", hw_buf_addr); 1207 pr_err("buf_size=x%08x\n", hwdescr->buf_size); 1208 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr); 1209 pr_err("result_size=x%08x\n", hwdescr->result_size); 1210 pr_err("valid_size=x%08x\n", hwdescr->valid_size); 1211 pr_err("data_status=x%08x\n", hwdescr->data_status); 1212 pr_err("data_error=x%08x\n", hwdescr->data_error); 1213 pr_err("which=%ld\n", descr - card->rx_chain.ring); 1214 1215 card->spider_stats.rx_desc_error++; 1216 goto bad_desc; 1217 } 1218 1219 /* Ok, we've got a packet in descr */ 1220 spider_net_pass_skb_up(descr, card); 1221 descr->skb = NULL; 1222 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1223 return 1; 1224 1225 bad_desc: 1226 if (netif_msg_rx_err(card)) 1227 show_rx_chain(card); 1228 dev_kfree_skb_irq(descr->skb); 1229 descr->skb = NULL; 1230 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1231 return 0; 1232 } 1233 1234 /** 1235 * spider_net_poll - NAPI poll function called by the stack to return packets 1236 * @netdev: interface device structure 1237 * @budget: number of packets we can pass to the stack at most 1238 * 1239 * returns 0 if no more packets available to the driver/stack. Returns 1, 1240 * if the quota is exceeded, but the driver has still packets. 1241 * 1242 * spider_net_poll returns all packets from the rx descriptors to the stack 1243 * (using netif_receive_skb). If all/enough packets are up, the driver 1244 * reenables interrupts and returns 0. If not, 1 is returned. 1245 */ 1246 static int spider_net_poll(struct napi_struct *napi, int budget) 1247 { 1248 struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); 1249 int packets_done = 0; 1250 1251 while (packets_done < budget) { 1252 if (!spider_net_decode_one_descr(card)) 1253 break; 1254 1255 packets_done++; 1256 } 1257 1258 if ((packets_done == 0) && (card->num_rx_ints != 0)) { 1259 if (!spider_net_resync_tail_ptr(card)) 1260 packets_done = budget; 1261 spider_net_resync_head_ptr(card); 1262 } 1263 card->num_rx_ints = 0; 1264 1265 spider_net_refill_rx_chain(card); 1266 spider_net_enable_rxdmac(card); 1267 1268 spider_net_cleanup_tx_ring(card); 1269 1270 /* if all packets are in the stack, enable interrupts and return 0 */ 1271 /* if not, return 1 */ 1272 if (packets_done < budget) { 1273 napi_complete(napi); 1274 spider_net_rx_irq_on(card); 1275 card->ignore_rx_ramfull = 0; 1276 } 1277 1278 return packets_done; 1279 } 1280 1281 /** 1282 * spider_net_change_mtu - changes the MTU of an interface 1283 * @netdev: interface device structure 1284 * @new_mtu: new MTU value 1285 * 1286 * returns 0 on success, <0 on failure 1287 */ 1288 static int 1289 spider_net_change_mtu(struct net_device *netdev, int new_mtu) 1290 { 1291 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k 1292 * and mtu is outbound only anyway */ 1293 if ( (new_mtu < SPIDER_NET_MIN_MTU ) || 1294 (new_mtu > SPIDER_NET_MAX_MTU) ) 1295 return -EINVAL; 1296 netdev->mtu = new_mtu; 1297 return 0; 1298 } 1299 1300 /** 1301 * spider_net_set_mac - sets the MAC of an interface 1302 * @netdev: interface device structure 1303 * @ptr: pointer to new MAC address 1304 * 1305 * Returns 0 on success, <0 on failure. Currently, we don't support this 1306 * and will always return EOPNOTSUPP. 1307 */ 1308 static int 1309 spider_net_set_mac(struct net_device *netdev, void *p) 1310 { 1311 struct spider_net_card *card = netdev_priv(netdev); 1312 u32 macl, macu, regvalue; 1313 struct sockaddr *addr = p; 1314 1315 if (!is_valid_ether_addr(addr->sa_data)) 1316 return -EADDRNOTAVAIL; 1317 1318 memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); 1319 1320 /* switch off GMACTPE and GMACRPE */ 1321 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); 1322 regvalue &= ~((1 << 5) | (1 << 6)); 1323 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); 1324 1325 /* write mac */ 1326 macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) + 1327 (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]); 1328 macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]); 1329 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); 1330 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); 1331 1332 /* switch GMACTPE and GMACRPE back on */ 1333 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); 1334 regvalue |= ((1 << 5) | (1 << 6)); 1335 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); 1336 1337 spider_net_set_promisc(card); 1338 1339 return 0; 1340 } 1341 1342 /** 1343 * spider_net_link_reset 1344 * @netdev: net device structure 1345 * 1346 * This is called when the PHY_LINK signal is asserted. For the blade this is 1347 * not connected so we should never get here. 1348 * 1349 */ 1350 static void 1351 spider_net_link_reset(struct net_device *netdev) 1352 { 1353 1354 struct spider_net_card *card = netdev_priv(netdev); 1355 1356 del_timer_sync(&card->aneg_timer); 1357 1358 /* clear interrupt, block further interrupts */ 1359 spider_net_write_reg(card, SPIDER_NET_GMACST, 1360 spider_net_read_reg(card, SPIDER_NET_GMACST)); 1361 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); 1362 1363 /* reset phy and setup aneg */ 1364 card->aneg_count = 0; 1365 card->medium = BCM54XX_COPPER; 1366 spider_net_setup_aneg(card); 1367 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1368 1369 } 1370 1371 /** 1372 * spider_net_handle_error_irq - handles errors raised by an interrupt 1373 * @card: card structure 1374 * @status_reg: interrupt status register 0 (GHIINT0STS) 1375 * 1376 * spider_net_handle_error_irq treats or ignores all error conditions 1377 * found when an interrupt is presented 1378 */ 1379 static void 1380 spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, 1381 u32 error_reg1, u32 error_reg2) 1382 { 1383 u32 i; 1384 int show_error = 1; 1385 1386 /* check GHIINT0STS ************************************/ 1387 if (status_reg) 1388 for (i = 0; i < 32; i++) 1389 if (status_reg & (1<<i)) 1390 switch (i) 1391 { 1392 /* let error_reg1 and error_reg2 evaluation decide, what to do 1393 case SPIDER_NET_PHYINT: 1394 case SPIDER_NET_GMAC2INT: 1395 case SPIDER_NET_GMAC1INT: 1396 case SPIDER_NET_GFIFOINT: 1397 case SPIDER_NET_DMACINT: 1398 case SPIDER_NET_GSYSINT: 1399 break; */ 1400 1401 case SPIDER_NET_GIPSINT: 1402 show_error = 0; 1403 break; 1404 1405 case SPIDER_NET_GPWOPCMPINT: 1406 /* PHY write operation completed */ 1407 show_error = 0; 1408 break; 1409 case SPIDER_NET_GPROPCMPINT: 1410 /* PHY read operation completed */ 1411 /* we don't use semaphores, as we poll for the completion 1412 * of the read operation in spider_net_read_phy. Should take 1413 * about 50 us */ 1414 show_error = 0; 1415 break; 1416 case SPIDER_NET_GPWFFINT: 1417 /* PHY command queue full */ 1418 if (netif_msg_intr(card)) 1419 dev_err(&card->netdev->dev, "PHY write queue full\n"); 1420 show_error = 0; 1421 break; 1422 1423 /* case SPIDER_NET_GRMDADRINT: not used. print a message */ 1424 /* case SPIDER_NET_GRMARPINT: not used. print a message */ 1425 /* case SPIDER_NET_GRMMPINT: not used. print a message */ 1426 1427 case SPIDER_NET_GDTDEN0INT: 1428 /* someone has set TX_DMA_EN to 0 */ 1429 show_error = 0; 1430 break; 1431 1432 case SPIDER_NET_GDDDEN0INT: /* fallthrough */ 1433 case SPIDER_NET_GDCDEN0INT: /* fallthrough */ 1434 case SPIDER_NET_GDBDEN0INT: /* fallthrough */ 1435 case SPIDER_NET_GDADEN0INT: 1436 /* someone has set RX_DMA_EN to 0 */ 1437 show_error = 0; 1438 break; 1439 1440 /* RX interrupts */ 1441 case SPIDER_NET_GDDFDCINT: 1442 case SPIDER_NET_GDCFDCINT: 1443 case SPIDER_NET_GDBFDCINT: 1444 case SPIDER_NET_GDAFDCINT: 1445 /* case SPIDER_NET_GDNMINT: not used. print a message */ 1446 /* case SPIDER_NET_GCNMINT: not used. print a message */ 1447 /* case SPIDER_NET_GBNMINT: not used. print a message */ 1448 /* case SPIDER_NET_GANMINT: not used. print a message */ 1449 /* case SPIDER_NET_GRFNMINT: not used. print a message */ 1450 show_error = 0; 1451 break; 1452 1453 /* TX interrupts */ 1454 case SPIDER_NET_GDTFDCINT: 1455 show_error = 0; 1456 break; 1457 case SPIDER_NET_GTTEDINT: 1458 show_error = 0; 1459 break; 1460 case SPIDER_NET_GDTDCEINT: 1461 /* chain end. If a descriptor should be sent, kick off 1462 * tx dma 1463 if (card->tx_chain.tail != card->tx_chain.head) 1464 spider_net_kick_tx_dma(card); 1465 */ 1466 show_error = 0; 1467 break; 1468 1469 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */ 1470 /* case SPIDER_NET_GFREECNTINT: not used. print a message */ 1471 } 1472 1473 /* check GHIINT1STS ************************************/ 1474 if (error_reg1) 1475 for (i = 0; i < 32; i++) 1476 if (error_reg1 & (1<<i)) 1477 switch (i) 1478 { 1479 case SPIDER_NET_GTMFLLINT: 1480 /* TX RAM full may happen on a usual case. 1481 * Logging is not needed. */ 1482 show_error = 0; 1483 break; 1484 case SPIDER_NET_GRFDFLLINT: /* fallthrough */ 1485 case SPIDER_NET_GRFCFLLINT: /* fallthrough */ 1486 case SPIDER_NET_GRFBFLLINT: /* fallthrough */ 1487 case SPIDER_NET_GRFAFLLINT: /* fallthrough */ 1488 case SPIDER_NET_GRMFLLINT: 1489 /* Could happen when rx chain is full */ 1490 if (card->ignore_rx_ramfull == 0) { 1491 card->ignore_rx_ramfull = 1; 1492 spider_net_resync_head_ptr(card); 1493 spider_net_refill_rx_chain(card); 1494 spider_net_enable_rxdmac(card); 1495 card->num_rx_ints ++; 1496 napi_schedule(&card->napi); 1497 } 1498 show_error = 0; 1499 break; 1500 1501 /* case SPIDER_NET_GTMSHTINT: problem, print a message */ 1502 case SPIDER_NET_GDTINVDINT: 1503 /* allrighty. tx from previous descr ok */ 1504 show_error = 0; 1505 break; 1506 1507 /* chain end */ 1508 case SPIDER_NET_GDDDCEINT: /* fallthrough */ 1509 case SPIDER_NET_GDCDCEINT: /* fallthrough */ 1510 case SPIDER_NET_GDBDCEINT: /* fallthrough */ 1511 case SPIDER_NET_GDADCEINT: 1512 spider_net_resync_head_ptr(card); 1513 spider_net_refill_rx_chain(card); 1514 spider_net_enable_rxdmac(card); 1515 card->num_rx_ints ++; 1516 napi_schedule(&card->napi); 1517 show_error = 0; 1518 break; 1519 1520 /* invalid descriptor */ 1521 case SPIDER_NET_GDDINVDINT: /* fallthrough */ 1522 case SPIDER_NET_GDCINVDINT: /* fallthrough */ 1523 case SPIDER_NET_GDBINVDINT: /* fallthrough */ 1524 case SPIDER_NET_GDAINVDINT: 1525 /* Could happen when rx chain is full */ 1526 spider_net_resync_head_ptr(card); 1527 spider_net_refill_rx_chain(card); 1528 spider_net_enable_rxdmac(card); 1529 card->num_rx_ints ++; 1530 napi_schedule(&card->napi); 1531 show_error = 0; 1532 break; 1533 1534 /* case SPIDER_NET_GDTRSERINT: problem, print a message */ 1535 /* case SPIDER_NET_GDDRSERINT: problem, print a message */ 1536 /* case SPIDER_NET_GDCRSERINT: problem, print a message */ 1537 /* case SPIDER_NET_GDBRSERINT: problem, print a message */ 1538 /* case SPIDER_NET_GDARSERINT: problem, print a message */ 1539 /* case SPIDER_NET_GDSERINT: problem, print a message */ 1540 /* case SPIDER_NET_GDTPTERINT: problem, print a message */ 1541 /* case SPIDER_NET_GDDPTERINT: problem, print a message */ 1542 /* case SPIDER_NET_GDCPTERINT: problem, print a message */ 1543 /* case SPIDER_NET_GDBPTERINT: problem, print a message */ 1544 /* case SPIDER_NET_GDAPTERINT: problem, print a message */ 1545 default: 1546 show_error = 1; 1547 break; 1548 } 1549 1550 /* check GHIINT2STS ************************************/ 1551 if (error_reg2) 1552 for (i = 0; i < 32; i++) 1553 if (error_reg2 & (1<<i)) 1554 switch (i) 1555 { 1556 /* there is nothing we can (want to) do at this time. Log a 1557 * message, we can switch on and off the specific values later on 1558 case SPIDER_NET_GPROPERINT: 1559 case SPIDER_NET_GMCTCRSNGINT: 1560 case SPIDER_NET_GMCTLCOLINT: 1561 case SPIDER_NET_GMCTTMOTINT: 1562 case SPIDER_NET_GMCRCAERINT: 1563 case SPIDER_NET_GMCRCALERINT: 1564 case SPIDER_NET_GMCRALNERINT: 1565 case SPIDER_NET_GMCROVRINT: 1566 case SPIDER_NET_GMCRRNTINT: 1567 case SPIDER_NET_GMCRRXERINT: 1568 case SPIDER_NET_GTITCSERINT: 1569 case SPIDER_NET_GTIFMTERINT: 1570 case SPIDER_NET_GTIPKTRVKINT: 1571 case SPIDER_NET_GTISPINGINT: 1572 case SPIDER_NET_GTISADNGINT: 1573 case SPIDER_NET_GTISPDNGINT: 1574 case SPIDER_NET_GRIFMTERINT: 1575 case SPIDER_NET_GRIPKTRVKINT: 1576 case SPIDER_NET_GRISPINGINT: 1577 case SPIDER_NET_GRISADNGINT: 1578 case SPIDER_NET_GRISPDNGINT: 1579 break; 1580 */ 1581 default: 1582 break; 1583 } 1584 1585 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit()) 1586 dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, " 1587 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1588 status_reg, error_reg1, error_reg2); 1589 1590 /* clear interrupt sources */ 1591 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1); 1592 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2); 1593 } 1594 1595 /** 1596 * spider_net_interrupt - interrupt handler for spider_net 1597 * @irq: interrupt number 1598 * @ptr: pointer to net_device 1599 * 1600 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no 1601 * interrupt found raised by card. 1602 * 1603 * This is the interrupt handler, that turns off 1604 * interrupts for this device and makes the stack poll the driver 1605 */ 1606 static irqreturn_t 1607 spider_net_interrupt(int irq, void *ptr) 1608 { 1609 struct net_device *netdev = ptr; 1610 struct spider_net_card *card = netdev_priv(netdev); 1611 u32 status_reg, error_reg1, error_reg2; 1612 1613 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); 1614 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS); 1615 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS); 1616 1617 if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) && 1618 !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) && 1619 !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE)) 1620 return IRQ_NONE; 1621 1622 if (status_reg & SPIDER_NET_RXINT ) { 1623 spider_net_rx_irq_off(card); 1624 napi_schedule(&card->napi); 1625 card->num_rx_ints ++; 1626 } 1627 if (status_reg & SPIDER_NET_TXINT) 1628 napi_schedule(&card->napi); 1629 1630 if (status_reg & SPIDER_NET_LINKINT) 1631 spider_net_link_reset(netdev); 1632 1633 if (status_reg & SPIDER_NET_ERRINT ) 1634 spider_net_handle_error_irq(card, status_reg, 1635 error_reg1, error_reg2); 1636 1637 /* clear interrupt sources */ 1638 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1639 1640 return IRQ_HANDLED; 1641 } 1642 1643 #ifdef CONFIG_NET_POLL_CONTROLLER 1644 /** 1645 * spider_net_poll_controller - artificial interrupt for netconsole etc. 1646 * @netdev: interface device structure 1647 * 1648 * see Documentation/networking/netconsole.txt 1649 */ 1650 static void 1651 spider_net_poll_controller(struct net_device *netdev) 1652 { 1653 disable_irq(netdev->irq); 1654 spider_net_interrupt(netdev->irq, netdev); 1655 enable_irq(netdev->irq); 1656 } 1657 #endif /* CONFIG_NET_POLL_CONTROLLER */ 1658 1659 /** 1660 * spider_net_enable_interrupts - enable interrupts 1661 * @card: card structure 1662 * 1663 * spider_net_enable_interrupt enables several interrupts 1664 */ 1665 static void 1666 spider_net_enable_interrupts(struct spider_net_card *card) 1667 { 1668 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 1669 SPIDER_NET_INT0_MASK_VALUE); 1670 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 1671 SPIDER_NET_INT1_MASK_VALUE); 1672 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 1673 SPIDER_NET_INT2_MASK_VALUE); 1674 } 1675 1676 /** 1677 * spider_net_disable_interrupts - disable interrupts 1678 * @card: card structure 1679 * 1680 * spider_net_disable_interrupts disables all the interrupts 1681 */ 1682 static void 1683 spider_net_disable_interrupts(struct spider_net_card *card) 1684 { 1685 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 1686 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); 1687 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); 1688 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); 1689 } 1690 1691 /** 1692 * spider_net_init_card - initializes the card 1693 * @card: card structure 1694 * 1695 * spider_net_init_card initializes the card so that other registers can 1696 * be used 1697 */ 1698 static void 1699 spider_net_init_card(struct spider_net_card *card) 1700 { 1701 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 1702 SPIDER_NET_CKRCTRL_STOP_VALUE); 1703 1704 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 1705 SPIDER_NET_CKRCTRL_RUN_VALUE); 1706 1707 /* trigger ETOMOD signal */ 1708 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, 1709 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4); 1710 1711 spider_net_disable_interrupts(card); 1712 } 1713 1714 /** 1715 * spider_net_enable_card - enables the card by setting all kinds of regs 1716 * @card: card structure 1717 * 1718 * spider_net_enable_card sets a lot of SMMIO registers to enable the device 1719 */ 1720 static void 1721 spider_net_enable_card(struct spider_net_card *card) 1722 { 1723 int i; 1724 /* the following array consists of (register),(value) pairs 1725 * that are set in this function. A register of 0 ends the list */ 1726 u32 regs[][2] = { 1727 { SPIDER_NET_GRESUMINTNUM, 0 }, 1728 { SPIDER_NET_GREINTNUM, 0 }, 1729 1730 /* set interrupt frame number registers */ 1731 /* clear the single DMA engine registers first */ 1732 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1733 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1734 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1735 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, 1736 /* then set, what we really need */ 1737 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE }, 1738 1739 /* timer counter registers and stuff */ 1740 { SPIDER_NET_GFREECNNUM, 0 }, 1741 { SPIDER_NET_GONETIMENUM, 0 }, 1742 { SPIDER_NET_GTOUTFRMNUM, 0 }, 1743 1744 /* RX mode setting */ 1745 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE }, 1746 /* TX mode setting */ 1747 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE }, 1748 /* IPSEC mode setting */ 1749 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE }, 1750 1751 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, 1752 1753 { SPIDER_NET_GMRWOLCTRL, 0 }, 1754 { SPIDER_NET_GTESTMD, 0x10000000 }, 1755 { SPIDER_NET_GTTQMSK, 0x00400040 }, 1756 1757 { SPIDER_NET_GMACINTEN, 0 }, 1758 1759 /* flow control stuff */ 1760 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE }, 1761 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE }, 1762 1763 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE }, 1764 { 0, 0} 1765 }; 1766 1767 i = 0; 1768 while (regs[i][0]) { 1769 spider_net_write_reg(card, regs[i][0], regs[i][1]); 1770 i++; 1771 } 1772 1773 /* clear unicast filter table entries 1 to 14 */ 1774 for (i = 1; i <= 14; i++) { 1775 spider_net_write_reg(card, 1776 SPIDER_NET_GMRUAFILnR + i * 8, 1777 0x00080000); 1778 spider_net_write_reg(card, 1779 SPIDER_NET_GMRUAFILnR + i * 8 + 4, 1780 0x00000000); 1781 } 1782 1783 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000); 1784 1785 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); 1786 1787 /* set chain tail address for RX chains and 1788 * enable DMA */ 1789 spider_net_enable_rxchtails(card); 1790 spider_net_enable_rxdmac(card); 1791 1792 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); 1793 1794 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1795 SPIDER_NET_LENLMT_VALUE); 1796 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, 1797 SPIDER_NET_OPMODE_VALUE); 1798 1799 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 1800 SPIDER_NET_GDTBSTA); 1801 } 1802 1803 /** 1804 * spider_net_download_firmware - loads firmware into the adapter 1805 * @card: card structure 1806 * @firmware_ptr: pointer to firmware data 1807 * 1808 * spider_net_download_firmware loads the firmware data into the 1809 * adapter. It assumes the length etc. to be allright. 1810 */ 1811 static int 1812 spider_net_download_firmware(struct spider_net_card *card, 1813 const void *firmware_ptr) 1814 { 1815 int sequencer, i; 1816 const u32 *fw_ptr = firmware_ptr; 1817 1818 /* stop sequencers */ 1819 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1820 SPIDER_NET_STOP_SEQ_VALUE); 1821 1822 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 1823 sequencer++) { 1824 spider_net_write_reg(card, 1825 SPIDER_NET_GSnPRGADR + sequencer * 8, 0); 1826 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 1827 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1828 sequencer * 8, *fw_ptr); 1829 fw_ptr++; 1830 } 1831 } 1832 1833 if (spider_net_read_reg(card, SPIDER_NET_GSINIT)) 1834 return -EIO; 1835 1836 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1837 SPIDER_NET_RUN_SEQ_VALUE); 1838 1839 return 0; 1840 } 1841 1842 /** 1843 * spider_net_init_firmware - reads in firmware parts 1844 * @card: card structure 1845 * 1846 * Returns 0 on success, <0 on failure 1847 * 1848 * spider_net_init_firmware opens the sequencer firmware and does some basic 1849 * checks. This function opens and releases the firmware structure. A call 1850 * to download the firmware is performed before the release. 1851 * 1852 * Firmware format 1853 * =============== 1854 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being 1855 * the program for each sequencer. Use the command 1856 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \ 1857 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \ 1858 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin 1859 * 1860 * to generate spider_fw.bin, if you have sequencer programs with something 1861 * like the following contents for each sequencer: 1862 * <ONE LINE COMMENT> 1863 * <FIRST 4-BYTES-WORD FOR SEQUENCER> 1864 * <SECOND 4-BYTES-WORD FOR SEQUENCER> 1865 * ... 1866 * <1024th 4-BYTES-WORD FOR SEQUENCER> 1867 */ 1868 static int 1869 spider_net_init_firmware(struct spider_net_card *card) 1870 { 1871 struct firmware *firmware = NULL; 1872 struct device_node *dn; 1873 const u8 *fw_prop = NULL; 1874 int err = -ENOENT; 1875 int fw_size; 1876 1877 if (request_firmware((const struct firmware **)&firmware, 1878 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) { 1879 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) && 1880 netif_msg_probe(card) ) { 1881 dev_err(&card->netdev->dev, 1882 "Incorrect size of spidernet firmware in " \ 1883 "filesystem. Looking in host firmware...\n"); 1884 goto try_host_fw; 1885 } 1886 err = spider_net_download_firmware(card, firmware->data); 1887 1888 release_firmware(firmware); 1889 if (err) 1890 goto try_host_fw; 1891 1892 goto done; 1893 } 1894 1895 try_host_fw: 1896 dn = pci_device_to_OF_node(card->pdev); 1897 if (!dn) 1898 goto out_err; 1899 1900 fw_prop = of_get_property(dn, "firmware", &fw_size); 1901 if (!fw_prop) 1902 goto out_err; 1903 1904 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) && 1905 netif_msg_probe(card) ) { 1906 dev_err(&card->netdev->dev, 1907 "Incorrect size of spidernet firmware in host firmware\n"); 1908 goto done; 1909 } 1910 1911 err = spider_net_download_firmware(card, fw_prop); 1912 1913 done: 1914 return err; 1915 out_err: 1916 if (netif_msg_probe(card)) 1917 dev_err(&card->netdev->dev, 1918 "Couldn't find spidernet firmware in filesystem " \ 1919 "or host firmware\n"); 1920 return err; 1921 } 1922 1923 /** 1924 * spider_net_open - called upon ifonfig up 1925 * @netdev: interface device structure 1926 * 1927 * returns 0 on success, <0 on failure 1928 * 1929 * spider_net_open allocates all the descriptors and memory needed for 1930 * operation, sets up multicast list and enables interrupts 1931 */ 1932 int 1933 spider_net_open(struct net_device *netdev) 1934 { 1935 struct spider_net_card *card = netdev_priv(netdev); 1936 int result; 1937 1938 result = spider_net_init_firmware(card); 1939 if (result) 1940 goto init_firmware_failed; 1941 1942 /* start probing with copper */ 1943 card->aneg_count = 0; 1944 card->medium = BCM54XX_COPPER; 1945 spider_net_setup_aneg(card); 1946 if (card->phy.def->phy_id) 1947 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1948 1949 result = spider_net_init_chain(card, &card->tx_chain); 1950 if (result) 1951 goto alloc_tx_failed; 1952 card->low_watermark = NULL; 1953 1954 result = spider_net_init_chain(card, &card->rx_chain); 1955 if (result) 1956 goto alloc_rx_failed; 1957 1958 /* Allocate rx skbs */ 1959 result = spider_net_alloc_rx_skbs(card); 1960 if (result) 1961 goto alloc_skbs_failed; 1962 1963 spider_net_set_multi(netdev); 1964 1965 /* further enhancement: setup hw vlan, if needed */ 1966 1967 result = -EBUSY; 1968 if (request_irq(netdev->irq, spider_net_interrupt, 1969 IRQF_SHARED, netdev->name, netdev)) 1970 goto register_int_failed; 1971 1972 spider_net_enable_card(card); 1973 1974 netif_start_queue(netdev); 1975 netif_carrier_on(netdev); 1976 napi_enable(&card->napi); 1977 1978 spider_net_enable_interrupts(card); 1979 1980 return 0; 1981 1982 register_int_failed: 1983 spider_net_free_rx_chain_contents(card); 1984 alloc_skbs_failed: 1985 spider_net_free_chain(card, &card->rx_chain); 1986 alloc_rx_failed: 1987 spider_net_free_chain(card, &card->tx_chain); 1988 alloc_tx_failed: 1989 del_timer_sync(&card->aneg_timer); 1990 init_firmware_failed: 1991 return result; 1992 } 1993 1994 /** 1995 * spider_net_link_phy 1996 * @data: used for pointer to card structure 1997 * 1998 */ 1999 static void spider_net_link_phy(unsigned long data) 2000 { 2001 struct spider_net_card *card = (struct spider_net_card *)data; 2002 struct mii_phy *phy = &card->phy; 2003 2004 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ 2005 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { 2006 2007 pr_debug("%s: link is down trying to bring it up\n", 2008 card->netdev->name); 2009 2010 switch (card->medium) { 2011 case BCM54XX_COPPER: 2012 /* enable fiber with autonegotiation first */ 2013 if (phy->def->ops->enable_fiber) 2014 phy->def->ops->enable_fiber(phy, 1); 2015 card->medium = BCM54XX_FIBER; 2016 break; 2017 2018 case BCM54XX_FIBER: 2019 /* fiber didn't come up, try to disable fiber autoneg */ 2020 if (phy->def->ops->enable_fiber) 2021 phy->def->ops->enable_fiber(phy, 0); 2022 card->medium = BCM54XX_UNKNOWN; 2023 break; 2024 2025 case BCM54XX_UNKNOWN: 2026 /* copper, fiber with and without failed, 2027 * retry from beginning */ 2028 spider_net_setup_aneg(card); 2029 card->medium = BCM54XX_COPPER; 2030 break; 2031 } 2032 2033 card->aneg_count = 0; 2034 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 2035 return; 2036 } 2037 2038 /* link still not up, try again later */ 2039 if (!(phy->def->ops->poll_link(phy))) { 2040 card->aneg_count++; 2041 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 2042 return; 2043 } 2044 2045 /* link came up, get abilities */ 2046 phy->def->ops->read_link(phy); 2047 2048 spider_net_write_reg(card, SPIDER_NET_GMACST, 2049 spider_net_read_reg(card, SPIDER_NET_GMACST)); 2050 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4); 2051 2052 if (phy->speed == 1000) 2053 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001); 2054 else 2055 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0); 2056 2057 card->aneg_count = 0; 2058 2059 pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n", 2060 card->netdev->name, phy->speed, 2061 phy->duplex == 1 ? "Full" : "Half", 2062 phy->autoneg == 1 ? "" : "no "); 2063 } 2064 2065 /** 2066 * spider_net_setup_phy - setup PHY 2067 * @card: card structure 2068 * 2069 * returns 0 on success, <0 on failure 2070 * 2071 * spider_net_setup_phy is used as part of spider_net_probe. 2072 **/ 2073 static int 2074 spider_net_setup_phy(struct spider_net_card *card) 2075 { 2076 struct mii_phy *phy = &card->phy; 2077 2078 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL, 2079 SPIDER_NET_DMASEL_VALUE); 2080 spider_net_write_reg(card, SPIDER_NET_GPCCTRL, 2081 SPIDER_NET_PHY_CTRL_VALUE); 2082 2083 phy->dev = card->netdev; 2084 phy->mdio_read = spider_net_read_phy; 2085 phy->mdio_write = spider_net_write_phy; 2086 2087 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) { 2088 unsigned short id; 2089 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); 2090 if (id != 0x0000 && id != 0xffff) { 2091 if (!sungem_phy_probe(phy, phy->mii_id)) { 2092 pr_info("Found %s.\n", phy->def->name); 2093 break; 2094 } 2095 } 2096 } 2097 2098 return 0; 2099 } 2100 2101 /** 2102 * spider_net_workaround_rxramfull - work around firmware bug 2103 * @card: card structure 2104 * 2105 * no return value 2106 **/ 2107 static void 2108 spider_net_workaround_rxramfull(struct spider_net_card *card) 2109 { 2110 int i, sequencer = 0; 2111 2112 /* cancel reset */ 2113 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2114 SPIDER_NET_CKRCTRL_RUN_VALUE); 2115 2116 /* empty sequencer data */ 2117 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 2118 sequencer++) { 2119 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + 2120 sequencer * 8, 0x0); 2121 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 2122 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 2123 sequencer * 8, 0x0); 2124 } 2125 } 2126 2127 /* set sequencer operation */ 2128 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe); 2129 2130 /* reset */ 2131 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2132 SPIDER_NET_CKRCTRL_STOP_VALUE); 2133 } 2134 2135 /** 2136 * spider_net_stop - called upon ifconfig down 2137 * @netdev: interface device structure 2138 * 2139 * always returns 0 2140 */ 2141 int 2142 spider_net_stop(struct net_device *netdev) 2143 { 2144 struct spider_net_card *card = netdev_priv(netdev); 2145 2146 napi_disable(&card->napi); 2147 netif_carrier_off(netdev); 2148 netif_stop_queue(netdev); 2149 del_timer_sync(&card->tx_timer); 2150 del_timer_sync(&card->aneg_timer); 2151 2152 spider_net_disable_interrupts(card); 2153 2154 free_irq(netdev->irq, netdev); 2155 2156 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 2157 SPIDER_NET_DMA_TX_FEND_VALUE); 2158 2159 /* turn off DMA, force end */ 2160 spider_net_disable_rxdmac(card); 2161 2162 /* release chains */ 2163 spider_net_release_tx_chain(card, 1); 2164 spider_net_free_rx_chain_contents(card); 2165 2166 spider_net_free_chain(card, &card->tx_chain); 2167 spider_net_free_chain(card, &card->rx_chain); 2168 2169 return 0; 2170 } 2171 2172 /** 2173 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout 2174 * function (to be called not under interrupt status) 2175 * @data: data, is interface device structure 2176 * 2177 * called as task when tx hangs, resets interface (if interface is up) 2178 */ 2179 static void 2180 spider_net_tx_timeout_task(struct work_struct *work) 2181 { 2182 struct spider_net_card *card = 2183 container_of(work, struct spider_net_card, tx_timeout_task); 2184 struct net_device *netdev = card->netdev; 2185 2186 if (!(netdev->flags & IFF_UP)) 2187 goto out; 2188 2189 netif_device_detach(netdev); 2190 spider_net_stop(netdev); 2191 2192 spider_net_workaround_rxramfull(card); 2193 spider_net_init_card(card); 2194 2195 if (spider_net_setup_phy(card)) 2196 goto out; 2197 2198 spider_net_open(netdev); 2199 spider_net_kick_tx_dma(card); 2200 netif_device_attach(netdev); 2201 2202 out: 2203 atomic_dec(&card->tx_timeout_task_counter); 2204 } 2205 2206 /** 2207 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in. 2208 * @netdev: interface device structure 2209 * 2210 * called, if tx hangs. Schedules a task that resets the interface 2211 */ 2212 static void 2213 spider_net_tx_timeout(struct net_device *netdev) 2214 { 2215 struct spider_net_card *card; 2216 2217 card = netdev_priv(netdev); 2218 atomic_inc(&card->tx_timeout_task_counter); 2219 if (netdev->flags & IFF_UP) 2220 schedule_work(&card->tx_timeout_task); 2221 else 2222 atomic_dec(&card->tx_timeout_task_counter); 2223 card->spider_stats.tx_timeouts++; 2224 } 2225 2226 static const struct net_device_ops spider_net_ops = { 2227 .ndo_open = spider_net_open, 2228 .ndo_stop = spider_net_stop, 2229 .ndo_start_xmit = spider_net_xmit, 2230 .ndo_set_rx_mode = spider_net_set_multi, 2231 .ndo_set_mac_address = spider_net_set_mac, 2232 .ndo_change_mtu = spider_net_change_mtu, 2233 .ndo_do_ioctl = spider_net_do_ioctl, 2234 .ndo_tx_timeout = spider_net_tx_timeout, 2235 .ndo_validate_addr = eth_validate_addr, 2236 /* HW VLAN */ 2237 #ifdef CONFIG_NET_POLL_CONTROLLER 2238 /* poll controller */ 2239 .ndo_poll_controller = spider_net_poll_controller, 2240 #endif /* CONFIG_NET_POLL_CONTROLLER */ 2241 }; 2242 2243 /** 2244 * spider_net_setup_netdev_ops - initialization of net_device operations 2245 * @netdev: net_device structure 2246 * 2247 * fills out function pointers in the net_device structure 2248 */ 2249 static void 2250 spider_net_setup_netdev_ops(struct net_device *netdev) 2251 { 2252 netdev->netdev_ops = &spider_net_ops; 2253 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; 2254 /* ethtool ops */ 2255 netdev->ethtool_ops = &spider_net_ethtool_ops; 2256 } 2257 2258 /** 2259 * spider_net_setup_netdev - initialization of net_device 2260 * @card: card structure 2261 * 2262 * Returns 0 on success or <0 on failure 2263 * 2264 * spider_net_setup_netdev initializes the net_device structure 2265 **/ 2266 static int 2267 spider_net_setup_netdev(struct spider_net_card *card) 2268 { 2269 int result; 2270 struct net_device *netdev = card->netdev; 2271 struct device_node *dn; 2272 struct sockaddr addr; 2273 const u8 *mac; 2274 2275 SET_NETDEV_DEV(netdev, &card->pdev->dev); 2276 2277 pci_set_drvdata(card->pdev, netdev); 2278 2279 init_timer(&card->tx_timer); 2280 card->tx_timer.function = 2281 (void (*)(unsigned long)) spider_net_cleanup_tx_ring; 2282 card->tx_timer.data = (unsigned long) card; 2283 netdev->irq = card->pdev->irq; 2284 2285 card->aneg_count = 0; 2286 init_timer(&card->aneg_timer); 2287 card->aneg_timer.function = spider_net_link_phy; 2288 card->aneg_timer.data = (unsigned long) card; 2289 2290 netif_napi_add(netdev, &card->napi, 2291 spider_net_poll, SPIDER_NET_NAPI_WEIGHT); 2292 2293 spider_net_setup_netdev_ops(netdev); 2294 2295 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2296 if (SPIDER_NET_RX_CSUM_DEFAULT) 2297 netdev->features |= NETIF_F_RXCSUM; 2298 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; 2299 /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2300 * NETIF_F_HW_VLAN_CTAG_FILTER */ 2301 2302 netdev->irq = card->pdev->irq; 2303 card->num_rx_ints = 0; 2304 card->ignore_rx_ramfull = 0; 2305 2306 dn = pci_device_to_OF_node(card->pdev); 2307 if (!dn) 2308 return -EIO; 2309 2310 mac = of_get_property(dn, "local-mac-address", NULL); 2311 if (!mac) 2312 return -EIO; 2313 memcpy(addr.sa_data, mac, ETH_ALEN); 2314 2315 result = spider_net_set_mac(netdev, &addr); 2316 if ((result) && (netif_msg_probe(card))) 2317 dev_err(&card->netdev->dev, 2318 "Failed to set MAC address: %i\n", result); 2319 2320 result = register_netdev(netdev); 2321 if (result) { 2322 if (netif_msg_probe(card)) 2323 dev_err(&card->netdev->dev, 2324 "Couldn't register net_device: %i\n", result); 2325 return result; 2326 } 2327 2328 if (netif_msg_probe(card)) 2329 pr_info("Initialized device %s.\n", netdev->name); 2330 2331 return 0; 2332 } 2333 2334 /** 2335 * spider_net_alloc_card - allocates net_device and card structure 2336 * 2337 * returns the card structure or NULL in case of errors 2338 * 2339 * the card and net_device structures are linked to each other 2340 */ 2341 static struct spider_net_card * 2342 spider_net_alloc_card(void) 2343 { 2344 struct net_device *netdev; 2345 struct spider_net_card *card; 2346 size_t alloc_size; 2347 2348 alloc_size = sizeof(struct spider_net_card) + 2349 (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr); 2350 netdev = alloc_etherdev(alloc_size); 2351 if (!netdev) 2352 return NULL; 2353 2354 card = netdev_priv(netdev); 2355 card->netdev = netdev; 2356 card->msg_enable = SPIDER_NET_DEFAULT_MSG; 2357 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); 2358 init_waitqueue_head(&card->waitq); 2359 atomic_set(&card->tx_timeout_task_counter, 0); 2360 2361 card->rx_chain.num_desc = rx_descriptors; 2362 card->rx_chain.ring = card->darray; 2363 card->tx_chain.num_desc = tx_descriptors; 2364 card->tx_chain.ring = card->darray + rx_descriptors; 2365 2366 return card; 2367 } 2368 2369 /** 2370 * spider_net_undo_pci_setup - releases PCI ressources 2371 * @card: card structure 2372 * 2373 * spider_net_undo_pci_setup releases the mapped regions 2374 */ 2375 static void 2376 spider_net_undo_pci_setup(struct spider_net_card *card) 2377 { 2378 iounmap(card->regs); 2379 pci_release_regions(card->pdev); 2380 } 2381 2382 /** 2383 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations 2384 * @pdev: PCI device 2385 * 2386 * Returns the card structure or NULL if any errors occur 2387 * 2388 * spider_net_setup_pci_dev initializes pdev and together with the 2389 * functions called in spider_net_open configures the device so that 2390 * data can be transferred over it 2391 * The net_device structure is attached to the card structure, if the 2392 * function returns without error. 2393 **/ 2394 static struct spider_net_card * 2395 spider_net_setup_pci_dev(struct pci_dev *pdev) 2396 { 2397 struct spider_net_card *card; 2398 unsigned long mmio_start, mmio_len; 2399 2400 if (pci_enable_device(pdev)) { 2401 dev_err(&pdev->dev, "Couldn't enable PCI device\n"); 2402 return NULL; 2403 } 2404 2405 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2406 dev_err(&pdev->dev, 2407 "Couldn't find proper PCI device base address.\n"); 2408 goto out_disable_dev; 2409 } 2410 2411 if (pci_request_regions(pdev, spider_net_driver_name)) { 2412 dev_err(&pdev->dev, 2413 "Couldn't obtain PCI resources, aborting.\n"); 2414 goto out_disable_dev; 2415 } 2416 2417 pci_set_master(pdev); 2418 2419 card = spider_net_alloc_card(); 2420 if (!card) { 2421 dev_err(&pdev->dev, 2422 "Couldn't allocate net_device structure, aborting.\n"); 2423 goto out_release_regions; 2424 } 2425 card->pdev = pdev; 2426 2427 /* fetch base address and length of first resource */ 2428 mmio_start = pci_resource_start(pdev, 0); 2429 mmio_len = pci_resource_len(pdev, 0); 2430 2431 card->netdev->mem_start = mmio_start; 2432 card->netdev->mem_end = mmio_start + mmio_len; 2433 card->regs = ioremap(mmio_start, mmio_len); 2434 2435 if (!card->regs) { 2436 dev_err(&pdev->dev, 2437 "Couldn't obtain PCI resources, aborting.\n"); 2438 goto out_release_regions; 2439 } 2440 2441 return card; 2442 2443 out_release_regions: 2444 pci_release_regions(pdev); 2445 out_disable_dev: 2446 pci_disable_device(pdev); 2447 return NULL; 2448 } 2449 2450 /** 2451 * spider_net_probe - initialization of a device 2452 * @pdev: PCI device 2453 * @ent: entry in the device id list 2454 * 2455 * Returns 0 on success, <0 on failure 2456 * 2457 * spider_net_probe initializes pdev and registers a net_device 2458 * structure for it. After that, the device can be ifconfig'ed up 2459 **/ 2460 static int 2461 spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2462 { 2463 int err = -EIO; 2464 struct spider_net_card *card; 2465 2466 card = spider_net_setup_pci_dev(pdev); 2467 if (!card) 2468 goto out; 2469 2470 spider_net_workaround_rxramfull(card); 2471 spider_net_init_card(card); 2472 2473 err = spider_net_setup_phy(card); 2474 if (err) 2475 goto out_undo_pci; 2476 2477 err = spider_net_setup_netdev(card); 2478 if (err) 2479 goto out_undo_pci; 2480 2481 return 0; 2482 2483 out_undo_pci: 2484 spider_net_undo_pci_setup(card); 2485 free_netdev(card->netdev); 2486 out: 2487 return err; 2488 } 2489 2490 /** 2491 * spider_net_remove - removal of a device 2492 * @pdev: PCI device 2493 * 2494 * Returns 0 on success, <0 on failure 2495 * 2496 * spider_net_remove is called to remove the device and unregisters the 2497 * net_device 2498 **/ 2499 static void 2500 spider_net_remove(struct pci_dev *pdev) 2501 { 2502 struct net_device *netdev; 2503 struct spider_net_card *card; 2504 2505 netdev = pci_get_drvdata(pdev); 2506 card = netdev_priv(netdev); 2507 2508 wait_event(card->waitq, 2509 atomic_read(&card->tx_timeout_task_counter) == 0); 2510 2511 unregister_netdev(netdev); 2512 2513 /* switch off card */ 2514 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2515 SPIDER_NET_CKRCTRL_STOP_VALUE); 2516 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 2517 SPIDER_NET_CKRCTRL_RUN_VALUE); 2518 2519 spider_net_undo_pci_setup(card); 2520 free_netdev(netdev); 2521 } 2522 2523 static struct pci_driver spider_net_driver = { 2524 .name = spider_net_driver_name, 2525 .id_table = spider_net_pci_tbl, 2526 .probe = spider_net_probe, 2527 .remove = spider_net_remove 2528 }; 2529 2530 /** 2531 * spider_net_init - init function when the driver is loaded 2532 * 2533 * spider_net_init registers the device driver 2534 */ 2535 static int __init spider_net_init(void) 2536 { 2537 printk(KERN_INFO "Spidernet version %s.\n", VERSION); 2538 2539 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) { 2540 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN; 2541 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); 2542 } 2543 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) { 2544 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX; 2545 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); 2546 } 2547 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) { 2548 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN; 2549 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); 2550 } 2551 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) { 2552 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX; 2553 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); 2554 } 2555 2556 return pci_register_driver(&spider_net_driver); 2557 } 2558 2559 /** 2560 * spider_net_cleanup - exit function when driver is unloaded 2561 * 2562 * spider_net_cleanup unregisters the device driver 2563 */ 2564 static void __exit spider_net_cleanup(void) 2565 { 2566 pci_unregister_driver(&spider_net_driver); 2567 } 2568 2569 module_init(spider_net_init); 2570 module_exit(spider_net_cleanup); 2571