1 /* 2 * Copyright (C) 2006-2007 PA Semi, Inc 3 * 4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/pci.h> 23 #include <linux/slab.h> 24 #include <linux/interrupt.h> 25 #include <linux/dmaengine.h> 26 #include <linux/delay.h> 27 #include <linux/netdevice.h> 28 #include <linux/of_mdio.h> 29 #include <linux/etherdevice.h> 30 #include <asm/dma-mapping.h> 31 #include <linux/in.h> 32 #include <linux/skbuff.h> 33 34 #include <linux/ip.h> 35 #include <linux/tcp.h> 36 #include <net/checksum.h> 37 #include <linux/inet_lro.h> 38 #include <linux/prefetch.h> 39 40 #include <asm/irq.h> 41 #include <asm/firmware.h> 42 #include <asm/pasemi_dma.h> 43 44 #include "pasemi_mac.h" 45 46 /* We have our own align, since ppc64 in general has it at 0 because 47 * of design flaws in some of the server bridge chips. However, for 48 * PWRficient doing the unaligned copies is more expensive than doing 49 * unaligned DMA, so make sure the data is aligned instead. 50 */ 51 #define LOCAL_SKB_ALIGN 2 52 53 /* TODO list 54 * 55 * - Multicast support 56 * - Large MTU support 57 * - SW LRO 58 * - Multiqueue RX/TX 59 */ 60 61 #define LRO_MAX_AGGR 64 62 63 #define PE_MIN_MTU 64 64 #define PE_MAX_MTU 9000 65 #define PE_DEF_MTU ETH_DATA_LEN 66 67 #define DEFAULT_MSG_ENABLE \ 68 (NETIF_MSG_DRV | \ 69 NETIF_MSG_PROBE | \ 70 NETIF_MSG_LINK | \ 71 NETIF_MSG_TIMER | \ 72 NETIF_MSG_IFDOWN | \ 73 NETIF_MSG_IFUP | \ 74 NETIF_MSG_RX_ERR | \ 75 NETIF_MSG_TX_ERR) 76 77 MODULE_LICENSE("GPL"); 78 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); 79 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); 80 81 static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ 82 module_param(debug, int, 0); 83 MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); 84 85 extern const struct ethtool_ops pasemi_mac_ethtool_ops; 86 87 static int translation_enabled(void) 88 { 89 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) 90 return 1; 91 #else 92 return firmware_has_feature(FW_FEATURE_LPAR); 93 #endif 94 } 95 96 static void write_iob_reg(unsigned int reg, unsigned int val) 97 { 98 pasemi_write_iob_reg(reg, val); 99 } 100 101 static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg) 102 { 103 return pasemi_read_mac_reg(mac->dma_if, reg); 104 } 105 106 static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg, 107 unsigned int val) 108 { 109 pasemi_write_mac_reg(mac->dma_if, reg, val); 110 } 111 112 static unsigned int read_dma_reg(unsigned int reg) 113 { 114 return pasemi_read_dma_reg(reg); 115 } 116 117 static void write_dma_reg(unsigned int reg, unsigned int val) 118 { 119 pasemi_write_dma_reg(reg, val); 120 } 121 122 static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac) 123 { 124 return mac->rx; 125 } 126 127 static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac) 128 { 129 return mac->tx; 130 } 131 132 static inline void prefetch_skb(const struct sk_buff *skb) 133 { 134 const void *d = skb; 135 136 prefetch(d); 137 prefetch(d+64); 138 prefetch(d+128); 139 prefetch(d+192); 140 } 141 142 static int mac_to_intf(struct pasemi_mac *mac) 143 { 144 struct pci_dev *pdev = mac->pdev; 145 u32 tmp; 146 int nintf, off, i, j; 147 int devfn = pdev->devfn; 148 149 tmp = read_dma_reg(PAS_DMA_CAP_IFI); 150 nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S; 151 off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S; 152 153 /* IOFF contains the offset to the registers containing the 154 * DMA interface-to-MAC-pci-id mappings, and NIN contains number 155 * of total interfaces. Each register contains 4 devfns. 156 * Just do a linear search until we find the devfn of the MAC 157 * we're trying to look up. 158 */ 159 160 for (i = 0; i < (nintf+3)/4; i++) { 161 tmp = read_dma_reg(off+4*i); 162 for (j = 0; j < 4; j++) { 163 if (((tmp >> (8*j)) & 0xff) == devfn) 164 return i*4 + j; 165 } 166 } 167 return -1; 168 } 169 170 static void pasemi_mac_intf_disable(struct pasemi_mac *mac) 171 { 172 unsigned int flags; 173 174 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 175 flags &= ~PAS_MAC_CFG_PCFG_PE; 176 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 177 } 178 179 static void pasemi_mac_intf_enable(struct pasemi_mac *mac) 180 { 181 unsigned int flags; 182 183 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 184 flags |= PAS_MAC_CFG_PCFG_PE; 185 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 186 } 187 188 static int pasemi_get_mac_addr(struct pasemi_mac *mac) 189 { 190 struct pci_dev *pdev = mac->pdev; 191 struct device_node *dn = pci_device_to_OF_node(pdev); 192 int len; 193 const u8 *maddr; 194 u8 addr[6]; 195 196 if (!dn) { 197 dev_dbg(&pdev->dev, 198 "No device node for mac, not configuring\n"); 199 return -ENOENT; 200 } 201 202 maddr = of_get_property(dn, "local-mac-address", &len); 203 204 if (maddr && len == 6) { 205 memcpy(mac->mac_addr, maddr, 6); 206 return 0; 207 } 208 209 /* Some old versions of firmware mistakenly uses mac-address 210 * (and as a string) instead of a byte array in local-mac-address. 211 */ 212 213 if (maddr == NULL) 214 maddr = of_get_property(dn, "mac-address", NULL); 215 216 if (maddr == NULL) { 217 dev_warn(&pdev->dev, 218 "no mac address in device tree, not configuring\n"); 219 return -ENOENT; 220 } 221 222 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], 223 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { 224 dev_warn(&pdev->dev, 225 "can't parse mac address, not configuring\n"); 226 return -EINVAL; 227 } 228 229 memcpy(mac->mac_addr, addr, 6); 230 231 return 0; 232 } 233 234 static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) 235 { 236 struct pasemi_mac *mac = netdev_priv(dev); 237 struct sockaddr *addr = p; 238 unsigned int adr0, adr1; 239 240 if (!is_valid_ether_addr(addr->sa_data)) 241 return -EADDRNOTAVAIL; 242 243 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 244 245 adr0 = dev->dev_addr[2] << 24 | 246 dev->dev_addr[3] << 16 | 247 dev->dev_addr[4] << 8 | 248 dev->dev_addr[5]; 249 adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); 250 adr1 &= ~0xffff; 251 adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; 252 253 pasemi_mac_intf_disable(mac); 254 write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); 255 write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); 256 pasemi_mac_intf_enable(mac); 257 258 return 0; 259 } 260 261 static int get_skb_hdr(struct sk_buff *skb, void **iphdr, 262 void **tcph, u64 *hdr_flags, void *data) 263 { 264 u64 macrx = (u64) data; 265 unsigned int ip_len; 266 struct iphdr *iph; 267 268 /* IPv4 header checksum failed */ 269 if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) 270 return -1; 271 272 /* non tcp packet */ 273 skb_reset_network_header(skb); 274 iph = ip_hdr(skb); 275 if (iph->protocol != IPPROTO_TCP) 276 return -1; 277 278 ip_len = ip_hdrlen(skb); 279 skb_set_transport_header(skb, ip_len); 280 *tcph = tcp_hdr(skb); 281 282 /* check if ip header and tcp header are complete */ 283 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) 284 return -1; 285 286 *hdr_flags = LRO_IPV4 | LRO_TCP; 287 *iphdr = iph; 288 289 return 0; 290 } 291 292 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, 293 const int nfrags, 294 struct sk_buff *skb, 295 const dma_addr_t *dmas) 296 { 297 int f; 298 struct pci_dev *pdev = mac->dma_pdev; 299 300 pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); 301 302 for (f = 0; f < nfrags; f++) { 303 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 304 305 pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE); 306 } 307 dev_kfree_skb_irq(skb); 308 309 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, 310 * aligned up to a power of 2 311 */ 312 return (nfrags + 3) & ~1; 313 } 314 315 static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac) 316 { 317 struct pasemi_mac_csring *ring; 318 u32 val; 319 unsigned int cfg; 320 int chno; 321 322 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring), 323 offsetof(struct pasemi_mac_csring, chan)); 324 325 if (!ring) { 326 dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n"); 327 goto out_chan; 328 } 329 330 chno = ring->chan.chno; 331 332 ring->size = CS_RING_SIZE; 333 ring->next_to_fill = 0; 334 335 /* Allocate descriptors */ 336 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE)) 337 goto out_ring_desc; 338 339 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), 340 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); 341 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); 342 val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3); 343 344 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); 345 346 ring->events[0] = pasemi_dma_alloc_flag(); 347 ring->events[1] = pasemi_dma_alloc_flag(); 348 if (ring->events[0] < 0 || ring->events[1] < 0) 349 goto out_flags; 350 351 pasemi_dma_clear_flag(ring->events[0]); 352 pasemi_dma_clear_flag(ring->events[1]); 353 354 ring->fun = pasemi_dma_alloc_fun(); 355 if (ring->fun < 0) 356 goto out_fun; 357 358 cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP | 359 PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) | 360 PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ; 361 362 if (translation_enabled()) 363 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; 364 365 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); 366 367 /* enable channel */ 368 pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | 369 PAS_DMA_TXCHAN_TCMDSTA_DB | 370 PAS_DMA_TXCHAN_TCMDSTA_DE | 371 PAS_DMA_TXCHAN_TCMDSTA_DA); 372 373 return ring; 374 375 out_fun: 376 out_flags: 377 if (ring->events[0] >= 0) 378 pasemi_dma_free_flag(ring->events[0]); 379 if (ring->events[1] >= 0) 380 pasemi_dma_free_flag(ring->events[1]); 381 pasemi_dma_free_ring(&ring->chan); 382 out_ring_desc: 383 pasemi_dma_free_chan(&ring->chan); 384 out_chan: 385 386 return NULL; 387 } 388 389 static void pasemi_mac_setup_csrings(struct pasemi_mac *mac) 390 { 391 int i; 392 mac->cs[0] = pasemi_mac_setup_csring(mac); 393 if (mac->type == MAC_TYPE_XAUI) 394 mac->cs[1] = pasemi_mac_setup_csring(mac); 395 else 396 mac->cs[1] = 0; 397 398 for (i = 0; i < MAX_CS; i++) 399 if (mac->cs[i]) 400 mac->num_cs++; 401 } 402 403 static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring) 404 { 405 pasemi_dma_stop_chan(&csring->chan); 406 pasemi_dma_free_flag(csring->events[0]); 407 pasemi_dma_free_flag(csring->events[1]); 408 pasemi_dma_free_ring(&csring->chan); 409 pasemi_dma_free_chan(&csring->chan); 410 pasemi_dma_free_fun(csring->fun); 411 } 412 413 static int pasemi_mac_setup_rx_resources(const struct net_device *dev) 414 { 415 struct pasemi_mac_rxring *ring; 416 struct pasemi_mac *mac = netdev_priv(dev); 417 int chno; 418 unsigned int cfg; 419 420 ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring), 421 offsetof(struct pasemi_mac_rxring, chan)); 422 423 if (!ring) { 424 dev_err(&mac->pdev->dev, "Can't allocate RX channel\n"); 425 goto out_chan; 426 } 427 chno = ring->chan.chno; 428 429 spin_lock_init(&ring->lock); 430 431 ring->size = RX_RING_SIZE; 432 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * 433 RX_RING_SIZE, GFP_KERNEL); 434 435 if (!ring->ring_info) 436 goto out_ring_info; 437 438 /* Allocate descriptors */ 439 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 440 goto out_ring_desc; 441 442 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, 443 RX_RING_SIZE * sizeof(u64), 444 &ring->buf_dma, GFP_KERNEL); 445 if (!ring->buffers) 446 goto out_ring_desc; 447 448 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); 449 450 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), 451 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); 452 453 write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno), 454 PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) | 455 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); 456 457 cfg = PAS_DMA_RXCHAN_CFG_HBU(2); 458 459 if (translation_enabled()) 460 cfg |= PAS_DMA_RXCHAN_CFG_CTR; 461 462 write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg); 463 464 write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if), 465 PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); 466 467 write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if), 468 PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | 469 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); 470 471 cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 | 472 PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | 473 PAS_DMA_RXINT_CFG_HEN; 474 475 if (translation_enabled()) 476 cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; 477 478 write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg); 479 480 ring->next_to_fill = 0; 481 ring->next_to_clean = 0; 482 ring->mac = mac; 483 mac->rx = ring; 484 485 return 0; 486 487 out_ring_desc: 488 kfree(ring->ring_info); 489 out_ring_info: 490 pasemi_dma_free_chan(&ring->chan); 491 out_chan: 492 return -ENOMEM; 493 } 494 495 static struct pasemi_mac_txring * 496 pasemi_mac_setup_tx_resources(const struct net_device *dev) 497 { 498 struct pasemi_mac *mac = netdev_priv(dev); 499 u32 val; 500 struct pasemi_mac_txring *ring; 501 unsigned int cfg; 502 int chno; 503 504 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring), 505 offsetof(struct pasemi_mac_txring, chan)); 506 507 if (!ring) { 508 dev_err(&mac->pdev->dev, "Can't allocate TX channel\n"); 509 goto out_chan; 510 } 511 512 chno = ring->chan.chno; 513 514 spin_lock_init(&ring->lock); 515 516 ring->size = TX_RING_SIZE; 517 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * 518 TX_RING_SIZE, GFP_KERNEL); 519 if (!ring->ring_info) 520 goto out_ring_info; 521 522 /* Allocate descriptors */ 523 if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) 524 goto out_ring_desc; 525 526 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), 527 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); 528 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); 529 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); 530 531 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); 532 533 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | 534 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | 535 PAS_DMA_TXCHAN_CFG_UP | 536 PAS_DMA_TXCHAN_CFG_WT(4); 537 538 if (translation_enabled()) 539 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; 540 541 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); 542 543 ring->next_to_fill = 0; 544 ring->next_to_clean = 0; 545 ring->mac = mac; 546 547 return ring; 548 549 out_ring_desc: 550 kfree(ring->ring_info); 551 out_ring_info: 552 pasemi_dma_free_chan(&ring->chan); 553 out_chan: 554 return NULL; 555 } 556 557 static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) 558 { 559 struct pasemi_mac_txring *txring = tx_ring(mac); 560 unsigned int i, j; 561 struct pasemi_mac_buffer *info; 562 dma_addr_t dmas[MAX_SKB_FRAGS+1]; 563 int freed, nfrags; 564 int start, limit; 565 566 start = txring->next_to_clean; 567 limit = txring->next_to_fill; 568 569 /* Compensate for when fill has wrapped and clean has not */ 570 if (start > limit) 571 limit += TX_RING_SIZE; 572 573 for (i = start; i < limit; i += freed) { 574 info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)]; 575 if (info->dma && info->skb) { 576 nfrags = skb_shinfo(info->skb)->nr_frags; 577 for (j = 0; j <= nfrags; j++) 578 dmas[j] = txring->ring_info[(i+1+j) & 579 (TX_RING_SIZE-1)].dma; 580 freed = pasemi_mac_unmap_tx_skb(mac, nfrags, 581 info->skb, dmas); 582 } else { 583 freed = 2; 584 } 585 } 586 587 kfree(txring->ring_info); 588 pasemi_dma_free_chan(&txring->chan); 589 590 } 591 592 static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) 593 { 594 struct pasemi_mac_rxring *rx = rx_ring(mac); 595 unsigned int i; 596 struct pasemi_mac_buffer *info; 597 598 for (i = 0; i < RX_RING_SIZE; i++) { 599 info = &RX_DESC_INFO(rx, i); 600 if (info->skb && info->dma) { 601 pci_unmap_single(mac->dma_pdev, 602 info->dma, 603 info->skb->len, 604 PCI_DMA_FROMDEVICE); 605 dev_kfree_skb_any(info->skb); 606 } 607 info->dma = 0; 608 info->skb = NULL; 609 } 610 611 for (i = 0; i < RX_RING_SIZE; i++) 612 RX_BUFF(rx, i) = 0; 613 } 614 615 static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) 616 { 617 pasemi_mac_free_rx_buffers(mac); 618 619 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), 620 rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); 621 622 kfree(rx_ring(mac)->ring_info); 623 pasemi_dma_free_chan(&rx_ring(mac)->chan); 624 mac->rx = NULL; 625 } 626 627 static void pasemi_mac_replenish_rx_ring(struct net_device *dev, 628 const int limit) 629 { 630 const struct pasemi_mac *mac = netdev_priv(dev); 631 struct pasemi_mac_rxring *rx = rx_ring(mac); 632 int fill, count; 633 634 if (limit <= 0) 635 return; 636 637 fill = rx_ring(mac)->next_to_fill; 638 for (count = 0; count < limit; count++) { 639 struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill); 640 u64 *buff = &RX_BUFF(rx, fill); 641 struct sk_buff *skb; 642 dma_addr_t dma; 643 644 /* Entry in use? */ 645 WARN_ON(*buff); 646 647 skb = netdev_alloc_skb(dev, mac->bufsz); 648 skb_reserve(skb, LOCAL_SKB_ALIGN); 649 650 if (unlikely(!skb)) 651 break; 652 653 dma = pci_map_single(mac->dma_pdev, skb->data, 654 mac->bufsz - LOCAL_SKB_ALIGN, 655 PCI_DMA_FROMDEVICE); 656 657 if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { 658 dev_kfree_skb_irq(info->skb); 659 break; 660 } 661 662 info->skb = skb; 663 info->dma = dma; 664 *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma); 665 fill++; 666 } 667 668 wmb(); 669 670 write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count); 671 672 rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & 673 (RX_RING_SIZE - 1); 674 } 675 676 static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac) 677 { 678 struct pasemi_mac_rxring *rx = rx_ring(mac); 679 unsigned int reg, pcnt; 680 /* Re-enable packet count interrupts: finally 681 * ack the packet count interrupt we got in rx_intr. 682 */ 683 684 pcnt = *rx->chan.status & PAS_STATUS_PCNT_M; 685 686 reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; 687 688 if (*rx->chan.status & PAS_STATUS_TIMER) 689 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; 690 691 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg); 692 } 693 694 static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac) 695 { 696 unsigned int reg, pcnt; 697 698 /* Re-enable packet count interrupts */ 699 pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; 700 701 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; 702 703 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg); 704 } 705 706 707 static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac, 708 const u64 macrx) 709 { 710 unsigned int rcmdsta, ccmdsta; 711 struct pasemi_dmachan *chan = &rx_ring(mac)->chan; 712 713 if (!netif_msg_rx_err(mac)) 714 return; 715 716 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 717 ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); 718 719 printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n", 720 macrx, *chan->status); 721 722 printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", 723 rcmdsta, ccmdsta); 724 } 725 726 static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac, 727 const u64 mactx) 728 { 729 unsigned int cmdsta; 730 struct pasemi_dmachan *chan = &tx_ring(mac)->chan; 731 732 if (!netif_msg_tx_err(mac)) 733 return; 734 735 cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); 736 737 printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\ 738 "tx status 0x%016llx\n", mactx, *chan->status); 739 740 printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); 741 } 742 743 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, 744 const int limit) 745 { 746 const struct pasemi_dmachan *chan = &rx->chan; 747 struct pasemi_mac *mac = rx->mac; 748 struct pci_dev *pdev = mac->dma_pdev; 749 unsigned int n; 750 int count, buf_index, tot_bytes, packets; 751 struct pasemi_mac_buffer *info; 752 struct sk_buff *skb; 753 unsigned int len; 754 u64 macrx, eval; 755 dma_addr_t dma; 756 757 tot_bytes = 0; 758 packets = 0; 759 760 spin_lock(&rx->lock); 761 762 n = rx->next_to_clean; 763 764 prefetch(&RX_DESC(rx, n)); 765 766 for (count = 0; count < limit; count++) { 767 macrx = RX_DESC(rx, n); 768 prefetch(&RX_DESC(rx, n+4)); 769 770 if ((macrx & XCT_MACRX_E) || 771 (*chan->status & PAS_STATUS_ERROR)) 772 pasemi_mac_rx_error(mac, macrx); 773 774 if (!(macrx & XCT_MACRX_O)) 775 break; 776 777 info = NULL; 778 779 BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); 780 781 eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >> 782 XCT_RXRES_8B_EVAL_S; 783 buf_index = eval-1; 784 785 dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M); 786 info = &RX_DESC_INFO(rx, buf_index); 787 788 skb = info->skb; 789 790 prefetch_skb(skb); 791 792 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; 793 794 pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, 795 PCI_DMA_FROMDEVICE); 796 797 if (macrx & XCT_MACRX_CRC) { 798 /* CRC error flagged */ 799 mac->netdev->stats.rx_errors++; 800 mac->netdev->stats.rx_crc_errors++; 801 /* No need to free skb, it'll be reused */ 802 goto next; 803 } 804 805 info->skb = NULL; 806 info->dma = 0; 807 808 if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { 809 skb->ip_summed = CHECKSUM_UNNECESSARY; 810 skb->csum = (macrx & XCT_MACRX_CSUM_M) >> 811 XCT_MACRX_CSUM_S; 812 } else { 813 skb_checksum_none_assert(skb); 814 } 815 816 packets++; 817 tot_bytes += len; 818 819 /* Don't include CRC */ 820 skb_put(skb, len-4); 821 822 skb->protocol = eth_type_trans(skb, mac->netdev); 823 lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); 824 825 next: 826 RX_DESC(rx, n) = 0; 827 RX_DESC(rx, n+1) = 0; 828 829 /* Need to zero it out since hardware doesn't, since the 830 * replenish loop uses it to tell when it's done. 831 */ 832 RX_BUFF(rx, buf_index) = 0; 833 834 n += 4; 835 } 836 837 if (n > RX_RING_SIZE) { 838 /* Errata 5971 workaround: L2 target of headers */ 839 write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0); 840 n &= (RX_RING_SIZE-1); 841 } 842 843 rx_ring(mac)->next_to_clean = n; 844 845 lro_flush_all(&mac->lro_mgr); 846 847 /* Increase is in number of 16-byte entries, and since each descriptor 848 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with 849 * count*2. 850 */ 851 write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1); 852 853 pasemi_mac_replenish_rx_ring(mac->netdev, count); 854 855 mac->netdev->stats.rx_bytes += tot_bytes; 856 mac->netdev->stats.rx_packets += packets; 857 858 spin_unlock(&rx_ring(mac)->lock); 859 860 return count; 861 } 862 863 /* Can't make this too large or we blow the kernel stack limits */ 864 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) 865 866 static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) 867 { 868 struct pasemi_dmachan *chan = &txring->chan; 869 struct pasemi_mac *mac = txring->mac; 870 int i, j; 871 unsigned int start, descr_count, buf_count, batch_limit; 872 unsigned int ring_limit; 873 unsigned int total_count; 874 unsigned long flags; 875 struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; 876 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; 877 int nf[TX_CLEAN_BATCHSIZE]; 878 int nr_frags; 879 880 total_count = 0; 881 batch_limit = TX_CLEAN_BATCHSIZE; 882 restart: 883 spin_lock_irqsave(&txring->lock, flags); 884 885 start = txring->next_to_clean; 886 ring_limit = txring->next_to_fill; 887 888 prefetch(&TX_DESC_INFO(txring, start+1).skb); 889 890 /* Compensate for when fill has wrapped but clean has not */ 891 if (start > ring_limit) 892 ring_limit += TX_RING_SIZE; 893 894 buf_count = 0; 895 descr_count = 0; 896 897 for (i = start; 898 descr_count < batch_limit && i < ring_limit; 899 i += buf_count) { 900 u64 mactx = TX_DESC(txring, i); 901 struct sk_buff *skb; 902 903 if ((mactx & XCT_MACTX_E) || 904 (*chan->status & PAS_STATUS_ERROR)) 905 pasemi_mac_tx_error(mac, mactx); 906 907 /* Skip over control descriptors */ 908 if (!(mactx & XCT_MACTX_LLEN_M)) { 909 TX_DESC(txring, i) = 0; 910 TX_DESC(txring, i+1) = 0; 911 buf_count = 2; 912 continue; 913 } 914 915 skb = TX_DESC_INFO(txring, i+1).skb; 916 nr_frags = TX_DESC_INFO(txring, i).dma; 917 918 if (unlikely(mactx & XCT_MACTX_O)) 919 /* Not yet transmitted */ 920 break; 921 922 buf_count = 2 + nr_frags; 923 /* Since we always fill with an even number of entries, make 924 * sure we skip any unused one at the end as well. 925 */ 926 if (buf_count & 1) 927 buf_count++; 928 929 for (j = 0; j <= nr_frags; j++) 930 dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma; 931 932 skbs[descr_count] = skb; 933 nf[descr_count] = nr_frags; 934 935 TX_DESC(txring, i) = 0; 936 TX_DESC(txring, i+1) = 0; 937 938 descr_count++; 939 } 940 txring->next_to_clean = i & (TX_RING_SIZE-1); 941 942 spin_unlock_irqrestore(&txring->lock, flags); 943 netif_wake_queue(mac->netdev); 944 945 for (i = 0; i < descr_count; i++) 946 pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]); 947 948 total_count += descr_count; 949 950 /* If the batch was full, try to clean more */ 951 if (descr_count == batch_limit) 952 goto restart; 953 954 return total_count; 955 } 956 957 958 static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) 959 { 960 const struct pasemi_mac_rxring *rxring = data; 961 struct pasemi_mac *mac = rxring->mac; 962 const struct pasemi_dmachan *chan = &rxring->chan; 963 unsigned int reg; 964 965 if (!(*chan->status & PAS_STATUS_CAUSE_M)) 966 return IRQ_NONE; 967 968 /* Don't reset packet count so it won't fire again but clear 969 * all others. 970 */ 971 972 reg = 0; 973 if (*chan->status & PAS_STATUS_SOFT) 974 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; 975 if (*chan->status & PAS_STATUS_ERROR) 976 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; 977 978 napi_schedule(&mac->napi); 979 980 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); 981 982 return IRQ_HANDLED; 983 } 984 985 #define TX_CLEAN_INTERVAL HZ 986 987 static void pasemi_mac_tx_timer(unsigned long data) 988 { 989 struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data; 990 struct pasemi_mac *mac = txring->mac; 991 992 pasemi_mac_clean_tx(txring); 993 994 mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL); 995 996 pasemi_mac_restart_tx_intr(mac); 997 } 998 999 static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) 1000 { 1001 struct pasemi_mac_txring *txring = data; 1002 const struct pasemi_dmachan *chan = &txring->chan; 1003 struct pasemi_mac *mac = txring->mac; 1004 unsigned int reg; 1005 1006 if (!(*chan->status & PAS_STATUS_CAUSE_M)) 1007 return IRQ_NONE; 1008 1009 reg = 0; 1010 1011 if (*chan->status & PAS_STATUS_SOFT) 1012 reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; 1013 if (*chan->status & PAS_STATUS_ERROR) 1014 reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; 1015 1016 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); 1017 1018 napi_schedule(&mac->napi); 1019 1020 if (reg) 1021 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); 1022 1023 return IRQ_HANDLED; 1024 } 1025 1026 static void pasemi_adjust_link(struct net_device *dev) 1027 { 1028 struct pasemi_mac *mac = netdev_priv(dev); 1029 int msg; 1030 unsigned int flags; 1031 unsigned int new_flags; 1032 1033 if (!mac->phydev->link) { 1034 /* If no link, MAC speed settings don't matter. Just report 1035 * link down and return. 1036 */ 1037 if (mac->link && netif_msg_link(mac)) 1038 printk(KERN_INFO "%s: Link is down.\n", dev->name); 1039 1040 netif_carrier_off(dev); 1041 pasemi_mac_intf_disable(mac); 1042 mac->link = 0; 1043 1044 return; 1045 } else { 1046 pasemi_mac_intf_enable(mac); 1047 netif_carrier_on(dev); 1048 } 1049 1050 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 1051 new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | 1052 PAS_MAC_CFG_PCFG_TSR_M); 1053 1054 if (!mac->phydev->duplex) 1055 new_flags |= PAS_MAC_CFG_PCFG_HD; 1056 1057 switch (mac->phydev->speed) { 1058 case 1000: 1059 new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | 1060 PAS_MAC_CFG_PCFG_TSR_1G; 1061 break; 1062 case 100: 1063 new_flags |= PAS_MAC_CFG_PCFG_SPD_100M | 1064 PAS_MAC_CFG_PCFG_TSR_100M; 1065 break; 1066 case 10: 1067 new_flags |= PAS_MAC_CFG_PCFG_SPD_10M | 1068 PAS_MAC_CFG_PCFG_TSR_10M; 1069 break; 1070 default: 1071 printk("Unsupported speed %d\n", mac->phydev->speed); 1072 } 1073 1074 /* Print on link or speed/duplex change */ 1075 msg = mac->link != mac->phydev->link || flags != new_flags; 1076 1077 mac->duplex = mac->phydev->duplex; 1078 mac->speed = mac->phydev->speed; 1079 mac->link = mac->phydev->link; 1080 1081 if (new_flags != flags) 1082 write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); 1083 1084 if (msg && netif_msg_link(mac)) 1085 printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", 1086 dev->name, mac->speed, mac->duplex ? "full" : "half"); 1087 } 1088 1089 static int pasemi_mac_phy_init(struct net_device *dev) 1090 { 1091 struct pasemi_mac *mac = netdev_priv(dev); 1092 struct device_node *dn, *phy_dn; 1093 struct phy_device *phydev; 1094 1095 dn = pci_device_to_OF_node(mac->pdev); 1096 phy_dn = of_parse_phandle(dn, "phy-handle", 0); 1097 of_node_put(phy_dn); 1098 1099 mac->link = 0; 1100 mac->speed = 0; 1101 mac->duplex = -1; 1102 1103 phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, 1104 PHY_INTERFACE_MODE_SGMII); 1105 1106 if (!phydev) { 1107 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); 1108 return -ENODEV; 1109 } 1110 1111 mac->phydev = phydev; 1112 1113 return 0; 1114 } 1115 1116 1117 static int pasemi_mac_open(struct net_device *dev) 1118 { 1119 struct pasemi_mac *mac = netdev_priv(dev); 1120 unsigned int flags; 1121 int i, ret; 1122 1123 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | 1124 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | 1125 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); 1126 1127 write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); 1128 1129 ret = pasemi_mac_setup_rx_resources(dev); 1130 if (ret) 1131 goto out_rx_resources; 1132 1133 mac->tx = pasemi_mac_setup_tx_resources(dev); 1134 1135 if (!mac->tx) 1136 goto out_tx_ring; 1137 1138 /* We might already have allocated rings in case mtu was changed 1139 * before interface was brought up. 1140 */ 1141 if (dev->mtu > 1500 && !mac->num_cs) { 1142 pasemi_mac_setup_csrings(mac); 1143 if (!mac->num_cs) 1144 goto out_tx_ring; 1145 } 1146 1147 /* Zero out rmon counters */ 1148 for (i = 0; i < 32; i++) 1149 write_mac_reg(mac, PAS_MAC_RMON(i), 0); 1150 1151 /* 0x3ff with 33MHz clock is about 31us */ 1152 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, 1153 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); 1154 1155 write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno), 1156 PAS_IOB_DMA_RXCH_CFG_CNTTH(256)); 1157 1158 write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), 1159 PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); 1160 1161 write_mac_reg(mac, PAS_MAC_IPC_CHNL, 1162 PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) | 1163 PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno)); 1164 1165 /* enable rx if */ 1166 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1167 PAS_DMA_RXINT_RCMDSTA_EN | 1168 PAS_DMA_RXINT_RCMDSTA_DROPS_M | 1169 PAS_DMA_RXINT_RCMDSTA_BP | 1170 PAS_DMA_RXINT_RCMDSTA_OO | 1171 PAS_DMA_RXINT_RCMDSTA_BT); 1172 1173 /* enable rx channel */ 1174 pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | 1175 PAS_DMA_RXCHAN_CCMDSTA_OD | 1176 PAS_DMA_RXCHAN_CCMDSTA_FD | 1177 PAS_DMA_RXCHAN_CCMDSTA_DT); 1178 1179 /* enable tx channel */ 1180 pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | 1181 PAS_DMA_TXCHAN_TCMDSTA_DB | 1182 PAS_DMA_TXCHAN_TCMDSTA_DE | 1183 PAS_DMA_TXCHAN_TCMDSTA_DA); 1184 1185 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); 1186 1187 write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), 1188 RX_RING_SIZE>>1); 1189 1190 /* Clear out any residual packet count state from firmware */ 1191 pasemi_mac_restart_rx_intr(mac); 1192 pasemi_mac_restart_tx_intr(mac); 1193 1194 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; 1195 1196 if (mac->type == MAC_TYPE_GMAC) 1197 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; 1198 else 1199 flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; 1200 1201 /* Enable interface in MAC */ 1202 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 1203 1204 ret = pasemi_mac_phy_init(dev); 1205 if (ret) { 1206 /* Since we won't get link notification, just enable RX */ 1207 pasemi_mac_intf_enable(mac); 1208 if (mac->type == MAC_TYPE_GMAC) { 1209 /* Warn for missing PHY on SGMII (1Gig) ports */ 1210 dev_warn(&mac->pdev->dev, 1211 "PHY init failed: %d.\n", ret); 1212 dev_warn(&mac->pdev->dev, 1213 "Defaulting to 1Gbit full duplex\n"); 1214 } 1215 } 1216 1217 netif_start_queue(dev); 1218 napi_enable(&mac->napi); 1219 1220 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", 1221 dev->name); 1222 1223 ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED, 1224 mac->tx_irq_name, mac->tx); 1225 if (ret) { 1226 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1227 mac->tx->chan.irq, ret); 1228 goto out_tx_int; 1229 } 1230 1231 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", 1232 dev->name); 1233 1234 ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED, 1235 mac->rx_irq_name, mac->rx); 1236 if (ret) { 1237 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1238 mac->rx->chan.irq, ret); 1239 goto out_rx_int; 1240 } 1241 1242 if (mac->phydev) 1243 phy_start(mac->phydev); 1244 1245 init_timer(&mac->tx->clean_timer); 1246 mac->tx->clean_timer.function = pasemi_mac_tx_timer; 1247 mac->tx->clean_timer.data = (unsigned long)mac->tx; 1248 mac->tx->clean_timer.expires = jiffies+HZ; 1249 add_timer(&mac->tx->clean_timer); 1250 1251 return 0; 1252 1253 out_rx_int: 1254 free_irq(mac->tx->chan.irq, mac->tx); 1255 out_tx_int: 1256 napi_disable(&mac->napi); 1257 netif_stop_queue(dev); 1258 out_tx_ring: 1259 if (mac->tx) 1260 pasemi_mac_free_tx_resources(mac); 1261 pasemi_mac_free_rx_resources(mac); 1262 out_rx_resources: 1263 1264 return ret; 1265 } 1266 1267 #define MAX_RETRIES 5000 1268 1269 static void pasemi_mac_pause_txchan(struct pasemi_mac *mac) 1270 { 1271 unsigned int sta, retries; 1272 int txch = tx_ring(mac)->chan.chno; 1273 1274 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 1275 PAS_DMA_TXCHAN_TCMDSTA_ST); 1276 1277 for (retries = 0; retries < MAX_RETRIES; retries++) { 1278 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); 1279 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) 1280 break; 1281 cond_resched(); 1282 } 1283 1284 if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) 1285 dev_err(&mac->dma_pdev->dev, 1286 "Failed to stop tx channel, tcmdsta %08x\n", sta); 1287 1288 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); 1289 } 1290 1291 static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac) 1292 { 1293 unsigned int sta, retries; 1294 int rxch = rx_ring(mac)->chan.chno; 1295 1296 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 1297 PAS_DMA_RXCHAN_CCMDSTA_ST); 1298 for (retries = 0; retries < MAX_RETRIES; retries++) { 1299 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); 1300 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) 1301 break; 1302 cond_resched(); 1303 } 1304 1305 if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) 1306 dev_err(&mac->dma_pdev->dev, 1307 "Failed to stop rx channel, ccmdsta 08%x\n", sta); 1308 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); 1309 } 1310 1311 static void pasemi_mac_pause_rxint(struct pasemi_mac *mac) 1312 { 1313 unsigned int sta, retries; 1314 1315 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1316 PAS_DMA_RXINT_RCMDSTA_ST); 1317 for (retries = 0; retries < MAX_RETRIES; retries++) { 1318 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 1319 if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) 1320 break; 1321 cond_resched(); 1322 } 1323 1324 if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) 1325 dev_err(&mac->dma_pdev->dev, 1326 "Failed to stop rx interface, rcmdsta %08x\n", sta); 1327 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); 1328 } 1329 1330 static int pasemi_mac_close(struct net_device *dev) 1331 { 1332 struct pasemi_mac *mac = netdev_priv(dev); 1333 unsigned int sta; 1334 int rxch, txch, i; 1335 1336 rxch = rx_ring(mac)->chan.chno; 1337 txch = tx_ring(mac)->chan.chno; 1338 1339 if (mac->phydev) { 1340 phy_stop(mac->phydev); 1341 phy_disconnect(mac->phydev); 1342 } 1343 1344 del_timer_sync(&mac->tx->clean_timer); 1345 1346 netif_stop_queue(dev); 1347 napi_disable(&mac->napi); 1348 1349 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 1350 if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | 1351 PAS_DMA_RXINT_RCMDSTA_OO | 1352 PAS_DMA_RXINT_RCMDSTA_BT)) 1353 printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); 1354 1355 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); 1356 if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | 1357 PAS_DMA_RXCHAN_CCMDSTA_OD | 1358 PAS_DMA_RXCHAN_CCMDSTA_FD | 1359 PAS_DMA_RXCHAN_CCMDSTA_DT)) 1360 printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); 1361 1362 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); 1363 if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | 1364 PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) 1365 printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); 1366 1367 /* Clean out any pending buffers */ 1368 pasemi_mac_clean_tx(tx_ring(mac)); 1369 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); 1370 1371 pasemi_mac_pause_txchan(mac); 1372 pasemi_mac_pause_rxint(mac); 1373 pasemi_mac_pause_rxchan(mac); 1374 pasemi_mac_intf_disable(mac); 1375 1376 free_irq(mac->tx->chan.irq, mac->tx); 1377 free_irq(mac->rx->chan.irq, mac->rx); 1378 1379 for (i = 0; i < mac->num_cs; i++) { 1380 pasemi_mac_free_csring(mac->cs[i]); 1381 mac->cs[i] = NULL; 1382 } 1383 1384 mac->num_cs = 0; 1385 1386 /* Free resources */ 1387 pasemi_mac_free_rx_resources(mac); 1388 pasemi_mac_free_tx_resources(mac); 1389 1390 return 0; 1391 } 1392 1393 static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, 1394 const dma_addr_t *map, 1395 const unsigned int *map_size, 1396 struct pasemi_mac_txring *txring, 1397 struct pasemi_mac_csring *csring) 1398 { 1399 u64 fund; 1400 dma_addr_t cs_dest; 1401 const int nh_off = skb_network_offset(skb); 1402 const int nh_len = skb_network_header_len(skb); 1403 const int nfrags = skb_shinfo(skb)->nr_frags; 1404 int cs_size, i, fill, hdr, cpyhdr, evt; 1405 dma_addr_t csdma; 1406 1407 fund = XCT_FUN_ST | XCT_FUN_RR_8BRES | 1408 XCT_FUN_O | XCT_FUN_FUN(csring->fun) | 1409 XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) | 1410 XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE; 1411 1412 switch (ip_hdr(skb)->protocol) { 1413 case IPPROTO_TCP: 1414 fund |= XCT_FUN_SIG_TCP4; 1415 /* TCP checksum is 16 bytes into the header */ 1416 cs_dest = map[0] + skb_transport_offset(skb) + 16; 1417 break; 1418 case IPPROTO_UDP: 1419 fund |= XCT_FUN_SIG_UDP4; 1420 /* UDP checksum is 6 bytes into the header */ 1421 cs_dest = map[0] + skb_transport_offset(skb) + 6; 1422 break; 1423 default: 1424 BUG(); 1425 } 1426 1427 /* Do the checksum offloaded */ 1428 fill = csring->next_to_fill; 1429 hdr = fill; 1430 1431 CS_DESC(csring, fill++) = fund; 1432 /* Room for 8BRES. Checksum result is really 2 bytes into it */ 1433 csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2; 1434 CS_DESC(csring, fill++) = 0; 1435 1436 CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off); 1437 for (i = 1; i <= nfrags; i++) 1438 CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); 1439 1440 fill += i; 1441 if (fill & 1) 1442 fill++; 1443 1444 /* Copy the result into the TCP packet */ 1445 cpyhdr = fill; 1446 CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) | 1447 XCT_FUN_LLEN(2) | XCT_FUN_SE; 1448 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T; 1449 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma); 1450 fill++; 1451 1452 evt = !csring->last_event; 1453 csring->last_event = evt; 1454 1455 /* Event handshaking with MAC TX */ 1456 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | 1457 CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]); 1458 CS_DESC(csring, fill++) = 0; 1459 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | 1460 CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]); 1461 CS_DESC(csring, fill++) = 0; 1462 csring->next_to_fill = fill & (CS_RING_SIZE-1); 1463 1464 cs_size = fill - hdr; 1465 write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1); 1466 1467 /* TX-side event handshaking */ 1468 fill = txring->next_to_fill; 1469 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | 1470 CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]); 1471 TX_DESC(txring, fill++) = 0; 1472 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | 1473 CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]); 1474 TX_DESC(txring, fill++) = 0; 1475 txring->next_to_fill = fill; 1476 1477 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); 1478 } 1479 1480 static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) 1481 { 1482 struct pasemi_mac * const mac = netdev_priv(dev); 1483 struct pasemi_mac_txring * const txring = tx_ring(mac); 1484 struct pasemi_mac_csring *csring; 1485 u64 dflags = 0; 1486 u64 mactx; 1487 dma_addr_t map[MAX_SKB_FRAGS+1]; 1488 unsigned int map_size[MAX_SKB_FRAGS+1]; 1489 unsigned long flags; 1490 int i, nfrags; 1491 int fill; 1492 const int nh_off = skb_network_offset(skb); 1493 const int nh_len = skb_network_header_len(skb); 1494 1495 prefetch(&txring->ring_info); 1496 1497 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; 1498 1499 nfrags = skb_shinfo(skb)->nr_frags; 1500 1501 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), 1502 PCI_DMA_TODEVICE); 1503 map_size[0] = skb_headlen(skb); 1504 if (pci_dma_mapping_error(mac->dma_pdev, map[0])) 1505 goto out_err_nolock; 1506 1507 for (i = 0; i < nfrags; i++) { 1508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1509 1510 map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0, 1511 skb_frag_size(frag), DMA_TO_DEVICE); 1512 map_size[i+1] = skb_frag_size(frag); 1513 if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) { 1514 nfrags = i; 1515 goto out_err_nolock; 1516 } 1517 } 1518 1519 if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) { 1520 switch (ip_hdr(skb)->protocol) { 1521 case IPPROTO_TCP: 1522 dflags |= XCT_MACTX_CSUM_TCP; 1523 dflags |= XCT_MACTX_IPH(nh_len >> 2); 1524 dflags |= XCT_MACTX_IPO(nh_off); 1525 break; 1526 case IPPROTO_UDP: 1527 dflags |= XCT_MACTX_CSUM_UDP; 1528 dflags |= XCT_MACTX_IPH(nh_len >> 2); 1529 dflags |= XCT_MACTX_IPO(nh_off); 1530 break; 1531 default: 1532 WARN_ON(1); 1533 } 1534 } 1535 1536 mactx = dflags | XCT_MACTX_LLEN(skb->len); 1537 1538 spin_lock_irqsave(&txring->lock, flags); 1539 1540 /* Avoid stepping on the same cache line that the DMA controller 1541 * is currently about to send, so leave at least 8 words available. 1542 * Total free space needed is mactx + fragments + 8 1543 */ 1544 if (RING_AVAIL(txring) < nfrags + 14) { 1545 /* no room -- stop the queue and wait for tx intr */ 1546 netif_stop_queue(dev); 1547 goto out_err; 1548 } 1549 1550 /* Queue up checksum + event descriptors, if needed */ 1551 if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) { 1552 csring = mac->cs[mac->last_cs]; 1553 mac->last_cs = (mac->last_cs + 1) % mac->num_cs; 1554 1555 pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring); 1556 } 1557 1558 fill = txring->next_to_fill; 1559 TX_DESC(txring, fill) = mactx; 1560 TX_DESC_INFO(txring, fill).dma = nfrags; 1561 fill++; 1562 TX_DESC_INFO(txring, fill).skb = skb; 1563 for (i = 0; i <= nfrags; i++) { 1564 TX_DESC(txring, fill+i) = 1565 XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); 1566 TX_DESC_INFO(txring, fill+i).dma = map[i]; 1567 } 1568 1569 /* We have to add an even number of 8-byte entries to the ring 1570 * even if the last one is unused. That means always an odd number 1571 * of pointers + one mactx descriptor. 1572 */ 1573 if (nfrags & 1) 1574 nfrags++; 1575 1576 txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1); 1577 1578 dev->stats.tx_packets++; 1579 dev->stats.tx_bytes += skb->len; 1580 1581 spin_unlock_irqrestore(&txring->lock, flags); 1582 1583 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1); 1584 1585 return NETDEV_TX_OK; 1586 1587 out_err: 1588 spin_unlock_irqrestore(&txring->lock, flags); 1589 out_err_nolock: 1590 while (nfrags--) 1591 pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], 1592 PCI_DMA_TODEVICE); 1593 1594 return NETDEV_TX_BUSY; 1595 } 1596 1597 static void pasemi_mac_set_rx_mode(struct net_device *dev) 1598 { 1599 const struct pasemi_mac *mac = netdev_priv(dev); 1600 unsigned int flags; 1601 1602 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 1603 1604 /* Set promiscuous */ 1605 if (dev->flags & IFF_PROMISC) 1606 flags |= PAS_MAC_CFG_PCFG_PR; 1607 else 1608 flags &= ~PAS_MAC_CFG_PCFG_PR; 1609 1610 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 1611 } 1612 1613 1614 static int pasemi_mac_poll(struct napi_struct *napi, int budget) 1615 { 1616 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); 1617 int pkts; 1618 1619 pasemi_mac_clean_tx(tx_ring(mac)); 1620 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); 1621 if (pkts < budget) { 1622 /* all done, no more packets present */ 1623 napi_complete(napi); 1624 1625 pasemi_mac_restart_rx_intr(mac); 1626 pasemi_mac_restart_tx_intr(mac); 1627 } 1628 return pkts; 1629 } 1630 1631 #ifdef CONFIG_NET_POLL_CONTROLLER 1632 /* 1633 * Polling 'interrupt' - used by things like netconsole to send skbs 1634 * without having to re-enable interrupts. It's not called while 1635 * the interrupt routine is executing. 1636 */ 1637 static void pasemi_mac_netpoll(struct net_device *dev) 1638 { 1639 const struct pasemi_mac *mac = netdev_priv(dev); 1640 1641 disable_irq(mac->tx->chan.irq); 1642 pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx); 1643 enable_irq(mac->tx->chan.irq); 1644 1645 disable_irq(mac->rx->chan.irq); 1646 pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx); 1647 enable_irq(mac->rx->chan.irq); 1648 } 1649 #endif 1650 1651 static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) 1652 { 1653 struct pasemi_mac *mac = netdev_priv(dev); 1654 unsigned int reg; 1655 unsigned int rcmdsta = 0; 1656 int running; 1657 int ret = 0; 1658 1659 if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) 1660 return -EINVAL; 1661 1662 running = netif_running(dev); 1663 1664 if (running) { 1665 /* Need to stop the interface, clean out all already 1666 * received buffers, free all unused buffers on the RX 1667 * interface ring, then finally re-fill the rx ring with 1668 * the new-size buffers and restart. 1669 */ 1670 1671 napi_disable(&mac->napi); 1672 netif_tx_disable(dev); 1673 pasemi_mac_intf_disable(mac); 1674 1675 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 1676 pasemi_mac_pause_rxint(mac); 1677 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); 1678 pasemi_mac_free_rx_buffers(mac); 1679 1680 } 1681 1682 /* Setup checksum channels if large MTU and none already allocated */ 1683 if (new_mtu > 1500 && !mac->num_cs) { 1684 pasemi_mac_setup_csrings(mac); 1685 if (!mac->num_cs) { 1686 ret = -ENOMEM; 1687 goto out; 1688 } 1689 } 1690 1691 /* Change maxf, i.e. what size frames are accepted. 1692 * Need room for ethernet header and CRC word 1693 */ 1694 reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG); 1695 reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M; 1696 reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); 1697 write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); 1698 1699 dev->mtu = new_mtu; 1700 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1701 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1702 1703 out: 1704 if (running) { 1705 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1706 rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); 1707 1708 rx_ring(mac)->next_to_fill = 0; 1709 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1); 1710 1711 napi_enable(&mac->napi); 1712 netif_start_queue(dev); 1713 pasemi_mac_intf_enable(mac); 1714 } 1715 1716 return ret; 1717 } 1718 1719 static const struct net_device_ops pasemi_netdev_ops = { 1720 .ndo_open = pasemi_mac_open, 1721 .ndo_stop = pasemi_mac_close, 1722 .ndo_start_xmit = pasemi_mac_start_tx, 1723 .ndo_set_rx_mode = pasemi_mac_set_rx_mode, 1724 .ndo_set_mac_address = pasemi_mac_set_mac_addr, 1725 .ndo_change_mtu = pasemi_mac_change_mtu, 1726 .ndo_validate_addr = eth_validate_addr, 1727 #ifdef CONFIG_NET_POLL_CONTROLLER 1728 .ndo_poll_controller = pasemi_mac_netpoll, 1729 #endif 1730 }; 1731 1732 static int 1733 pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1734 { 1735 struct net_device *dev; 1736 struct pasemi_mac *mac; 1737 int err, ret; 1738 1739 err = pci_enable_device(pdev); 1740 if (err) 1741 return err; 1742 1743 dev = alloc_etherdev(sizeof(struct pasemi_mac)); 1744 if (dev == NULL) { 1745 err = -ENOMEM; 1746 goto out_disable_device; 1747 } 1748 1749 pci_set_drvdata(pdev, dev); 1750 SET_NETDEV_DEV(dev, &pdev->dev); 1751 1752 mac = netdev_priv(dev); 1753 1754 mac->pdev = pdev; 1755 mac->netdev = dev; 1756 1757 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); 1758 1759 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | 1760 NETIF_F_HIGHDMA | NETIF_F_GSO; 1761 1762 mac->lro_mgr.max_aggr = LRO_MAX_AGGR; 1763 mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; 1764 mac->lro_mgr.lro_arr = mac->lro_desc; 1765 mac->lro_mgr.get_skb_header = get_skb_hdr; 1766 mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; 1767 mac->lro_mgr.dev = mac->netdev; 1768 mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1769 mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1770 1771 1772 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); 1773 if (!mac->dma_pdev) { 1774 dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); 1775 err = -ENODEV; 1776 goto out; 1777 } 1778 1779 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); 1780 if (!mac->iob_pdev) { 1781 dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); 1782 err = -ENODEV; 1783 goto out; 1784 } 1785 1786 /* get mac addr from device tree */ 1787 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { 1788 err = -ENODEV; 1789 goto out; 1790 } 1791 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); 1792 1793 ret = mac_to_intf(mac); 1794 if (ret < 0) { 1795 dev_err(&mac->pdev->dev, "Can't map DMA interface\n"); 1796 err = -ENODEV; 1797 goto out; 1798 } 1799 mac->dma_if = ret; 1800 1801 switch (pdev->device) { 1802 case 0xa005: 1803 mac->type = MAC_TYPE_GMAC; 1804 break; 1805 case 0xa006: 1806 mac->type = MAC_TYPE_XAUI; 1807 break; 1808 default: 1809 err = -ENODEV; 1810 goto out; 1811 } 1812 1813 dev->netdev_ops = &pasemi_netdev_ops; 1814 dev->mtu = PE_DEF_MTU; 1815 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1816 mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1817 1818 dev->ethtool_ops = &pasemi_mac_ethtool_ops; 1819 1820 if (err) 1821 goto out; 1822 1823 mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 1824 1825 /* Enable most messages by default */ 1826 mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1827 1828 err = register_netdev(dev); 1829 1830 if (err) { 1831 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", 1832 err); 1833 goto out; 1834 } else if (netif_msg_probe(mac)) { 1835 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n", 1836 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", 1837 mac->dma_if, dev->dev_addr); 1838 } 1839 1840 return err; 1841 1842 out: 1843 if (mac->iob_pdev) 1844 pci_dev_put(mac->iob_pdev); 1845 if (mac->dma_pdev) 1846 pci_dev_put(mac->dma_pdev); 1847 1848 free_netdev(dev); 1849 out_disable_device: 1850 pci_disable_device(pdev); 1851 return err; 1852 1853 } 1854 1855 static void pasemi_mac_remove(struct pci_dev *pdev) 1856 { 1857 struct net_device *netdev = pci_get_drvdata(pdev); 1858 struct pasemi_mac *mac; 1859 1860 if (!netdev) 1861 return; 1862 1863 mac = netdev_priv(netdev); 1864 1865 unregister_netdev(netdev); 1866 1867 pci_disable_device(pdev); 1868 pci_dev_put(mac->dma_pdev); 1869 pci_dev_put(mac->iob_pdev); 1870 1871 pasemi_dma_free_chan(&mac->tx->chan); 1872 pasemi_dma_free_chan(&mac->rx->chan); 1873 1874 pci_set_drvdata(pdev, NULL); 1875 free_netdev(netdev); 1876 } 1877 1878 static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = { 1879 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, 1880 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, 1881 { }, 1882 }; 1883 1884 MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl); 1885 1886 static struct pci_driver pasemi_mac_driver = { 1887 .name = "pasemi_mac", 1888 .id_table = pasemi_mac_pci_tbl, 1889 .probe = pasemi_mac_probe, 1890 .remove = pasemi_mac_remove, 1891 }; 1892 1893 static void __exit pasemi_mac_cleanup_module(void) 1894 { 1895 pci_unregister_driver(&pasemi_mac_driver); 1896 } 1897 1898 int pasemi_mac_init_module(void) 1899 { 1900 int err; 1901 1902 err = pasemi_dma_init(); 1903 if (err) 1904 return err; 1905 1906 return pci_register_driver(&pasemi_mac_driver); 1907 } 1908 1909 module_init(pasemi_mac_init_module); 1910 module_exit(pasemi_mac_cleanup_module); 1911