1 /* 2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. 3 * 4 * Copyright (c) 2003 Intracom S.A. 5 * by Pantelis Antoniou <panto@intracom.gr> 6 * 7 * 2005 (c) MontaVista Software, Inc. 8 * Vitaly Bordug <vbordug@ru.mvista.com> 9 * 10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> 11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> 12 * 13 * This file is licensed under the terms of the GNU General Public License 14 * version 2. This program is licensed "as is" without any warranty of any 15 * kind, whether express or implied. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/kernel.h> 20 #include <linux/types.h> 21 #include <linux/string.h> 22 #include <linux/ptrace.h> 23 #include <linux/errno.h> 24 #include <linux/ioport.h> 25 #include <linux/slab.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/netdevice.h> 29 #include <linux/etherdevice.h> 30 #include <linux/skbuff.h> 31 #include <linux/spinlock.h> 32 #include <linux/mii.h> 33 #include <linux/ethtool.h> 34 #include <linux/bitops.h> 35 #include <linux/fs.h> 36 #include <linux/platform_device.h> 37 #include <linux/phy.h> 38 #include <linux/of.h> 39 #include <linux/of_mdio.h> 40 #include <linux/of_platform.h> 41 #include <linux/of_gpio.h> 42 #include <linux/of_net.h> 43 44 #include <linux/vmalloc.h> 45 #include <asm/pgtable.h> 46 #include <asm/irq.h> 47 #include <asm/uaccess.h> 48 49 #include "fs_enet.h" 50 51 /*************************************************/ 52 53 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); 54 MODULE_DESCRIPTION("Freescale Ethernet Driver"); 55 MODULE_LICENSE("GPL"); 56 MODULE_VERSION(DRV_MODULE_VERSION); 57 58 static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ 59 module_param(fs_enet_debug, int, 0); 60 MODULE_PARM_DESC(fs_enet_debug, 61 "Freescale bitmapped debugging message enable value"); 62 63 #ifdef CONFIG_NET_POLL_CONTROLLER 64 static void fs_enet_netpoll(struct net_device *dev); 65 #endif 66 67 static void fs_set_multicast_list(struct net_device *dev) 68 { 69 struct fs_enet_private *fep = netdev_priv(dev); 70 71 (*fep->ops->set_multicast_list)(dev); 72 } 73 74 static void skb_align(struct sk_buff *skb, int align) 75 { 76 int off = ((unsigned long)skb->data) & (align - 1); 77 78 if (off) 79 skb_reserve(skb, align - off); 80 } 81 82 /* NAPI receive function */ 83 static int fs_enet_rx_napi(struct napi_struct *napi, int budget) 84 { 85 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); 86 struct net_device *dev = fep->ndev; 87 const struct fs_platform_info *fpi = fep->fpi; 88 cbd_t __iomem *bdp; 89 struct sk_buff *skb, *skbn, *skbt; 90 int received = 0; 91 u16 pkt_len, sc; 92 int curidx; 93 94 if (budget <= 0) 95 return received; 96 97 /* 98 * First, grab all of the stats for the incoming packet. 99 * These get messed up if we get called due to a busy condition. 100 */ 101 bdp = fep->cur_rx; 102 103 /* clear RX status bits for napi*/ 104 (*fep->ops->napi_clear_rx_event)(dev); 105 106 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { 107 curidx = bdp - fep->rx_bd_base; 108 109 /* 110 * Since we have allocated space to hold a complete frame, 111 * the last indicator should be set. 112 */ 113 if ((sc & BD_ENET_RX_LAST) == 0) 114 dev_warn(fep->dev, "rcv is not +last\n"); 115 116 /* 117 * Check for errors. 118 */ 119 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | 120 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { 121 fep->stats.rx_errors++; 122 /* Frame too long or too short. */ 123 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) 124 fep->stats.rx_length_errors++; 125 /* Frame alignment */ 126 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 127 fep->stats.rx_frame_errors++; 128 /* CRC Error */ 129 if (sc & BD_ENET_RX_CR) 130 fep->stats.rx_crc_errors++; 131 /* FIFO overrun */ 132 if (sc & BD_ENET_RX_OV) 133 fep->stats.rx_crc_errors++; 134 135 skb = fep->rx_skbuff[curidx]; 136 137 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 138 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 139 DMA_FROM_DEVICE); 140 141 skbn = skb; 142 143 } else { 144 skb = fep->rx_skbuff[curidx]; 145 146 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 147 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 148 DMA_FROM_DEVICE); 149 150 /* 151 * Process the incoming frame. 152 */ 153 fep->stats.rx_packets++; 154 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ 155 fep->stats.rx_bytes += pkt_len + 4; 156 157 if (pkt_len <= fpi->rx_copybreak) { 158 /* +2 to make IP header L1 cache aligned */ 159 skbn = netdev_alloc_skb(dev, pkt_len + 2); 160 if (skbn != NULL) { 161 skb_reserve(skbn, 2); /* align IP header */ 162 skb_copy_from_linear_data(skb, 163 skbn->data, pkt_len); 164 /* swap */ 165 skbt = skb; 166 skb = skbn; 167 skbn = skbt; 168 } 169 } else { 170 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 171 172 if (skbn) 173 skb_align(skbn, ENET_RX_ALIGN); 174 } 175 176 if (skbn != NULL) { 177 skb_put(skb, pkt_len); /* Make room */ 178 skb->protocol = eth_type_trans(skb, dev); 179 received++; 180 netif_receive_skb(skb); 181 } else { 182 fep->stats.rx_dropped++; 183 skbn = skb; 184 } 185 } 186 187 fep->rx_skbuff[curidx] = skbn; 188 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, 189 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 190 DMA_FROM_DEVICE)); 191 CBDW_DATLEN(bdp, 0); 192 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); 193 194 /* 195 * Update BD pointer to next entry. 196 */ 197 if ((sc & BD_ENET_RX_WRAP) == 0) 198 bdp++; 199 else 200 bdp = fep->rx_bd_base; 201 202 (*fep->ops->rx_bd_done)(dev); 203 204 if (received >= budget) 205 break; 206 } 207 208 fep->cur_rx = bdp; 209 210 if (received < budget) { 211 /* done */ 212 napi_complete(napi); 213 (*fep->ops->napi_enable_rx)(dev); 214 } 215 return received; 216 } 217 218 static int fs_enet_tx_napi(struct napi_struct *napi, int budget) 219 { 220 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, 221 napi_tx); 222 struct net_device *dev = fep->ndev; 223 cbd_t __iomem *bdp; 224 struct sk_buff *skb; 225 int dirtyidx, do_wake, do_restart; 226 u16 sc; 227 int has_tx_work = 0; 228 229 spin_lock(&fep->tx_lock); 230 bdp = fep->dirty_tx; 231 232 /* clear TX status bits for napi*/ 233 (*fep->ops->napi_clear_tx_event)(dev); 234 235 do_wake = do_restart = 0; 236 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { 237 dirtyidx = bdp - fep->tx_bd_base; 238 239 if (fep->tx_free == fep->tx_ring) 240 break; 241 242 skb = fep->tx_skbuff[dirtyidx]; 243 244 /* 245 * Check for errors. 246 */ 247 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 248 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { 249 250 if (sc & BD_ENET_TX_HB) /* No heartbeat */ 251 fep->stats.tx_heartbeat_errors++; 252 if (sc & BD_ENET_TX_LC) /* Late collision */ 253 fep->stats.tx_window_errors++; 254 if (sc & BD_ENET_TX_RL) /* Retrans limit */ 255 fep->stats.tx_aborted_errors++; 256 if (sc & BD_ENET_TX_UN) /* Underrun */ 257 fep->stats.tx_fifo_errors++; 258 if (sc & BD_ENET_TX_CSL) /* Carrier lost */ 259 fep->stats.tx_carrier_errors++; 260 261 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { 262 fep->stats.tx_errors++; 263 do_restart = 1; 264 } 265 } else 266 fep->stats.tx_packets++; 267 268 if (sc & BD_ENET_TX_READY) { 269 dev_warn(fep->dev, 270 "HEY! Enet xmit interrupt and TX_READY.\n"); 271 } 272 273 /* 274 * Deferred means some collisions occurred during transmit, 275 * but we eventually sent the packet OK. 276 */ 277 if (sc & BD_ENET_TX_DEF) 278 fep->stats.collisions++; 279 280 /* unmap */ 281 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 282 skb->len, DMA_TO_DEVICE); 283 284 /* 285 * Free the sk buffer associated with this last transmit. 286 */ 287 dev_kfree_skb(skb); 288 fep->tx_skbuff[dirtyidx] = NULL; 289 290 /* 291 * Update pointer to next buffer descriptor to be transmitted. 292 */ 293 if ((sc & BD_ENET_TX_WRAP) == 0) 294 bdp++; 295 else 296 bdp = fep->tx_bd_base; 297 298 /* 299 * Since we have freed up a buffer, the ring is no longer 300 * full. 301 */ 302 if (!fep->tx_free++) 303 do_wake = 1; 304 has_tx_work = 1; 305 } 306 307 fep->dirty_tx = bdp; 308 309 if (do_restart) 310 (*fep->ops->tx_restart)(dev); 311 312 if (!has_tx_work) { 313 napi_complete(napi); 314 (*fep->ops->napi_enable_tx)(dev); 315 } 316 317 spin_unlock(&fep->tx_lock); 318 319 if (do_wake) 320 netif_wake_queue(dev); 321 322 if (has_tx_work) 323 return budget; 324 return 0; 325 } 326 327 /* 328 * The interrupt handler. 329 * This is called from the MPC core interrupt. 330 */ 331 static irqreturn_t 332 fs_enet_interrupt(int irq, void *dev_id) 333 { 334 struct net_device *dev = dev_id; 335 struct fs_enet_private *fep; 336 const struct fs_platform_info *fpi; 337 u32 int_events; 338 u32 int_clr_events; 339 int nr, napi_ok; 340 int handled; 341 342 fep = netdev_priv(dev); 343 fpi = fep->fpi; 344 345 nr = 0; 346 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { 347 nr++; 348 349 int_clr_events = int_events; 350 int_clr_events &= ~fep->ev_napi_rx; 351 352 (*fep->ops->clear_int_events)(dev, int_clr_events); 353 354 if (int_events & fep->ev_err) 355 (*fep->ops->ev_error)(dev, int_events); 356 357 if (int_events & fep->ev_rx) { 358 napi_ok = napi_schedule_prep(&fep->napi); 359 360 (*fep->ops->napi_disable_rx)(dev); 361 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); 362 363 /* NOTE: it is possible for FCCs in NAPI mode */ 364 /* to submit a spurious interrupt while in poll */ 365 if (napi_ok) 366 __napi_schedule(&fep->napi); 367 } 368 369 if (int_events & fep->ev_tx) { 370 napi_ok = napi_schedule_prep(&fep->napi_tx); 371 372 (*fep->ops->napi_disable_tx)(dev); 373 (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx); 374 375 /* NOTE: it is possible for FCCs in NAPI mode */ 376 /* to submit a spurious interrupt while in poll */ 377 if (napi_ok) 378 __napi_schedule(&fep->napi_tx); 379 } 380 } 381 382 handled = nr > 0; 383 return IRQ_RETVAL(handled); 384 } 385 386 void fs_init_bds(struct net_device *dev) 387 { 388 struct fs_enet_private *fep = netdev_priv(dev); 389 cbd_t __iomem *bdp; 390 struct sk_buff *skb; 391 int i; 392 393 fs_cleanup_bds(dev); 394 395 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 396 fep->tx_free = fep->tx_ring; 397 fep->cur_rx = fep->rx_bd_base; 398 399 /* 400 * Initialize the receive buffer descriptors. 401 */ 402 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 403 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 404 if (skb == NULL) 405 break; 406 407 skb_align(skb, ENET_RX_ALIGN); 408 fep->rx_skbuff[i] = skb; 409 CBDW_BUFADDR(bdp, 410 dma_map_single(fep->dev, skb->data, 411 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 412 DMA_FROM_DEVICE)); 413 CBDW_DATLEN(bdp, 0); /* zero */ 414 CBDW_SC(bdp, BD_ENET_RX_EMPTY | 415 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); 416 } 417 /* 418 * if we failed, fillup remainder 419 */ 420 for (; i < fep->rx_ring; i++, bdp++) { 421 fep->rx_skbuff[i] = NULL; 422 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); 423 } 424 425 /* 426 * ...and the same for transmit. 427 */ 428 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 429 fep->tx_skbuff[i] = NULL; 430 CBDW_BUFADDR(bdp, 0); 431 CBDW_DATLEN(bdp, 0); 432 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); 433 } 434 } 435 436 void fs_cleanup_bds(struct net_device *dev) 437 { 438 struct fs_enet_private *fep = netdev_priv(dev); 439 struct sk_buff *skb; 440 cbd_t __iomem *bdp; 441 int i; 442 443 /* 444 * Reset SKB transmit buffers. 445 */ 446 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 447 if ((skb = fep->tx_skbuff[i]) == NULL) 448 continue; 449 450 /* unmap */ 451 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 452 skb->len, DMA_TO_DEVICE); 453 454 fep->tx_skbuff[i] = NULL; 455 dev_kfree_skb(skb); 456 } 457 458 /* 459 * Reset SKB receive buffers 460 */ 461 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 462 if ((skb = fep->rx_skbuff[i]) == NULL) 463 continue; 464 465 /* unmap */ 466 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 467 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 468 DMA_FROM_DEVICE); 469 470 fep->rx_skbuff[i] = NULL; 471 472 dev_kfree_skb(skb); 473 } 474 } 475 476 /**********************************************************************************/ 477 478 #ifdef CONFIG_FS_ENET_MPC5121_FEC 479 /* 480 * MPC5121 FEC requeries 4-byte alignment for TX data buffer! 481 */ 482 static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, 483 struct sk_buff *skb) 484 { 485 struct sk_buff *new_skb; 486 487 /* Alloc new skb */ 488 new_skb = netdev_alloc_skb(dev, skb->len + 4); 489 if (!new_skb) 490 return NULL; 491 492 /* Make sure new skb is properly aligned */ 493 skb_align(new_skb, 4); 494 495 /* Copy data to new skb ... */ 496 skb_copy_from_linear_data(skb, new_skb->data, skb->len); 497 skb_put(new_skb, skb->len); 498 499 /* ... and free an old one */ 500 dev_kfree_skb_any(skb); 501 502 return new_skb; 503 } 504 #endif 505 506 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 507 { 508 struct fs_enet_private *fep = netdev_priv(dev); 509 cbd_t __iomem *bdp; 510 int curidx; 511 u16 sc; 512 513 #ifdef CONFIG_FS_ENET_MPC5121_FEC 514 if (((unsigned long)skb->data) & 0x3) { 515 skb = tx_skb_align_workaround(dev, skb); 516 if (!skb) { 517 /* 518 * We have lost packet due to memory allocation error 519 * in tx_skb_align_workaround(). Hopefully original 520 * skb is still valid, so try transmit it later. 521 */ 522 return NETDEV_TX_BUSY; 523 } 524 } 525 #endif 526 spin_lock(&fep->tx_lock); 527 528 /* 529 * Fill in a Tx ring entry 530 */ 531 bdp = fep->cur_tx; 532 533 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 534 netif_stop_queue(dev); 535 spin_unlock(&fep->tx_lock); 536 537 /* 538 * Ooops. All transmit buffers are full. Bail out. 539 * This should not happen, since the tx queue should be stopped. 540 */ 541 dev_warn(fep->dev, "tx queue full!.\n"); 542 return NETDEV_TX_BUSY; 543 } 544 545 curidx = bdp - fep->tx_bd_base; 546 /* 547 * Clear all of the status flags. 548 */ 549 CBDC_SC(bdp, BD_ENET_TX_STATS); 550 551 /* 552 * Save skb pointer. 553 */ 554 fep->tx_skbuff[curidx] = skb; 555 556 fep->stats.tx_bytes += skb->len; 557 558 /* 559 * Push the data cache so the CPM does not get stale memory data. 560 */ 561 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 562 skb->data, skb->len, DMA_TO_DEVICE)); 563 CBDW_DATLEN(bdp, skb->len); 564 565 /* 566 * If this was the last BD in the ring, start at the beginning again. 567 */ 568 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 569 fep->cur_tx++; 570 else 571 fep->cur_tx = fep->tx_bd_base; 572 573 if (!--fep->tx_free) 574 netif_stop_queue(dev); 575 576 /* Trigger transmission start */ 577 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 578 BD_ENET_TX_LAST | BD_ENET_TX_TC; 579 580 /* note that while FEC does not have this bit 581 * it marks it as available for software use 582 * yay for hw reuse :) */ 583 if (skb->len <= 60) 584 sc |= BD_ENET_TX_PAD; 585 CBDS_SC(bdp, sc); 586 587 skb_tx_timestamp(skb); 588 589 (*fep->ops->tx_kickstart)(dev); 590 591 spin_unlock(&fep->tx_lock); 592 593 return NETDEV_TX_OK; 594 } 595 596 static void fs_timeout(struct net_device *dev) 597 { 598 struct fs_enet_private *fep = netdev_priv(dev); 599 unsigned long flags; 600 int wake = 0; 601 602 fep->stats.tx_errors++; 603 604 spin_lock_irqsave(&fep->lock, flags); 605 606 if (dev->flags & IFF_UP) { 607 phy_stop(fep->phydev); 608 (*fep->ops->stop)(dev); 609 (*fep->ops->restart)(dev); 610 phy_start(fep->phydev); 611 } 612 613 phy_start(fep->phydev); 614 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 615 spin_unlock_irqrestore(&fep->lock, flags); 616 617 if (wake) 618 netif_wake_queue(dev); 619 } 620 621 /*----------------------------------------------------------------------------- 622 * generic link-change handler - should be sufficient for most cases 623 *-----------------------------------------------------------------------------*/ 624 static void generic_adjust_link(struct net_device *dev) 625 { 626 struct fs_enet_private *fep = netdev_priv(dev); 627 struct phy_device *phydev = fep->phydev; 628 int new_state = 0; 629 630 if (phydev->link) { 631 /* adjust to duplex mode */ 632 if (phydev->duplex != fep->oldduplex) { 633 new_state = 1; 634 fep->oldduplex = phydev->duplex; 635 } 636 637 if (phydev->speed != fep->oldspeed) { 638 new_state = 1; 639 fep->oldspeed = phydev->speed; 640 } 641 642 if (!fep->oldlink) { 643 new_state = 1; 644 fep->oldlink = 1; 645 } 646 647 if (new_state) 648 fep->ops->restart(dev); 649 } else if (fep->oldlink) { 650 new_state = 1; 651 fep->oldlink = 0; 652 fep->oldspeed = 0; 653 fep->oldduplex = -1; 654 } 655 656 if (new_state && netif_msg_link(fep)) 657 phy_print_status(phydev); 658 } 659 660 661 static void fs_adjust_link(struct net_device *dev) 662 { 663 struct fs_enet_private *fep = netdev_priv(dev); 664 unsigned long flags; 665 666 spin_lock_irqsave(&fep->lock, flags); 667 668 if(fep->ops->adjust_link) 669 fep->ops->adjust_link(dev); 670 else 671 generic_adjust_link(dev); 672 673 spin_unlock_irqrestore(&fep->lock, flags); 674 } 675 676 static int fs_init_phy(struct net_device *dev) 677 { 678 struct fs_enet_private *fep = netdev_priv(dev); 679 struct phy_device *phydev; 680 phy_interface_t iface; 681 682 fep->oldlink = 0; 683 fep->oldspeed = 0; 684 fep->oldduplex = -1; 685 686 iface = fep->fpi->use_rmii ? 687 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII; 688 689 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, 690 iface); 691 if (!phydev) { 692 dev_err(&dev->dev, "Could not attach to PHY\n"); 693 return -ENODEV; 694 } 695 696 fep->phydev = phydev; 697 698 return 0; 699 } 700 701 static int fs_enet_open(struct net_device *dev) 702 { 703 struct fs_enet_private *fep = netdev_priv(dev); 704 int r; 705 int err; 706 707 /* to initialize the fep->cur_rx,... */ 708 /* not doing this, will cause a crash in fs_enet_rx_napi */ 709 fs_init_bds(fep->ndev); 710 711 napi_enable(&fep->napi); 712 napi_enable(&fep->napi_tx); 713 714 /* Install our interrupt handler. */ 715 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, 716 "fs_enet-mac", dev); 717 if (r != 0) { 718 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); 719 napi_disable(&fep->napi); 720 napi_disable(&fep->napi_tx); 721 return -EINVAL; 722 } 723 724 err = fs_init_phy(dev); 725 if (err) { 726 free_irq(fep->interrupt, dev); 727 napi_disable(&fep->napi); 728 napi_disable(&fep->napi_tx); 729 return err; 730 } 731 phy_start(fep->phydev); 732 733 netif_start_queue(dev); 734 735 return 0; 736 } 737 738 static int fs_enet_close(struct net_device *dev) 739 { 740 struct fs_enet_private *fep = netdev_priv(dev); 741 unsigned long flags; 742 743 netif_stop_queue(dev); 744 netif_carrier_off(dev); 745 napi_disable(&fep->napi); 746 napi_disable(&fep->napi_tx); 747 phy_stop(fep->phydev); 748 749 spin_lock_irqsave(&fep->lock, flags); 750 spin_lock(&fep->tx_lock); 751 (*fep->ops->stop)(dev); 752 spin_unlock(&fep->tx_lock); 753 spin_unlock_irqrestore(&fep->lock, flags); 754 755 /* release any irqs */ 756 phy_disconnect(fep->phydev); 757 fep->phydev = NULL; 758 free_irq(fep->interrupt, dev); 759 760 return 0; 761 } 762 763 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) 764 { 765 struct fs_enet_private *fep = netdev_priv(dev); 766 return &fep->stats; 767 } 768 769 /*************************************************************************/ 770 771 static void fs_get_drvinfo(struct net_device *dev, 772 struct ethtool_drvinfo *info) 773 { 774 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 775 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 776 } 777 778 static int fs_get_regs_len(struct net_device *dev) 779 { 780 struct fs_enet_private *fep = netdev_priv(dev); 781 782 return (*fep->ops->get_regs_len)(dev); 783 } 784 785 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, 786 void *p) 787 { 788 struct fs_enet_private *fep = netdev_priv(dev); 789 unsigned long flags; 790 int r, len; 791 792 len = regs->len; 793 794 spin_lock_irqsave(&fep->lock, flags); 795 r = (*fep->ops->get_regs)(dev, p, &len); 796 spin_unlock_irqrestore(&fep->lock, flags); 797 798 if (r == 0) 799 regs->version = 0; 800 } 801 802 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 803 { 804 struct fs_enet_private *fep = netdev_priv(dev); 805 806 if (!fep->phydev) 807 return -ENODEV; 808 809 return phy_ethtool_gset(fep->phydev, cmd); 810 } 811 812 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 813 { 814 struct fs_enet_private *fep = netdev_priv(dev); 815 816 if (!fep->phydev) 817 return -ENODEV; 818 819 return phy_ethtool_sset(fep->phydev, cmd); 820 } 821 822 static int fs_nway_reset(struct net_device *dev) 823 { 824 return 0; 825 } 826 827 static u32 fs_get_msglevel(struct net_device *dev) 828 { 829 struct fs_enet_private *fep = netdev_priv(dev); 830 return fep->msg_enable; 831 } 832 833 static void fs_set_msglevel(struct net_device *dev, u32 value) 834 { 835 struct fs_enet_private *fep = netdev_priv(dev); 836 fep->msg_enable = value; 837 } 838 839 static const struct ethtool_ops fs_ethtool_ops = { 840 .get_drvinfo = fs_get_drvinfo, 841 .get_regs_len = fs_get_regs_len, 842 .get_settings = fs_get_settings, 843 .set_settings = fs_set_settings, 844 .nway_reset = fs_nway_reset, 845 .get_link = ethtool_op_get_link, 846 .get_msglevel = fs_get_msglevel, 847 .set_msglevel = fs_set_msglevel, 848 .get_regs = fs_get_regs, 849 .get_ts_info = ethtool_op_get_ts_info, 850 }; 851 852 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 853 { 854 struct fs_enet_private *fep = netdev_priv(dev); 855 856 if (!netif_running(dev)) 857 return -EINVAL; 858 859 return phy_mii_ioctl(fep->phydev, rq, cmd); 860 } 861 862 extern int fs_mii_connect(struct net_device *dev); 863 extern void fs_mii_disconnect(struct net_device *dev); 864 865 /**************************************************************************************/ 866 867 #ifdef CONFIG_FS_ENET_HAS_FEC 868 #define IS_FEC(match) ((match)->data == &fs_fec_ops) 869 #else 870 #define IS_FEC(match) 0 871 #endif 872 873 static const struct net_device_ops fs_enet_netdev_ops = { 874 .ndo_open = fs_enet_open, 875 .ndo_stop = fs_enet_close, 876 .ndo_get_stats = fs_enet_get_stats, 877 .ndo_start_xmit = fs_enet_start_xmit, 878 .ndo_tx_timeout = fs_timeout, 879 .ndo_set_rx_mode = fs_set_multicast_list, 880 .ndo_do_ioctl = fs_ioctl, 881 .ndo_validate_addr = eth_validate_addr, 882 .ndo_set_mac_address = eth_mac_addr, 883 .ndo_change_mtu = eth_change_mtu, 884 #ifdef CONFIG_NET_POLL_CONTROLLER 885 .ndo_poll_controller = fs_enet_netpoll, 886 #endif 887 }; 888 889 static struct of_device_id fs_enet_match[]; 890 static int fs_enet_probe(struct platform_device *ofdev) 891 { 892 const struct of_device_id *match; 893 struct net_device *ndev; 894 struct fs_enet_private *fep; 895 struct fs_platform_info *fpi; 896 const u32 *data; 897 struct clk *clk; 898 int err; 899 const u8 *mac_addr; 900 const char *phy_connection_type; 901 int privsize, len, ret = -ENODEV; 902 903 match = of_match_device(fs_enet_match, &ofdev->dev); 904 if (!match) 905 return -EINVAL; 906 907 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 908 if (!fpi) 909 return -ENOMEM; 910 911 if (!IS_FEC(match)) { 912 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 913 if (!data || len != 4) 914 goto out_free_fpi; 915 916 fpi->cp_command = *data; 917 } 918 919 fpi->rx_ring = 32; 920 fpi->tx_ring = 32; 921 fpi->rx_copybreak = 240; 922 fpi->napi_weight = 17; 923 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 924 if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { 925 err = of_phy_register_fixed_link(ofdev->dev.of_node); 926 if (err) 927 goto out_free_fpi; 928 929 /* In the case of a fixed PHY, the DT node associated 930 * to the PHY is the Ethernet MAC DT node. 931 */ 932 fpi->phy_node = of_node_get(ofdev->dev.of_node); 933 } 934 935 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { 936 phy_connection_type = of_get_property(ofdev->dev.of_node, 937 "phy-connection-type", NULL); 938 if (phy_connection_type && !strcmp("rmii", phy_connection_type)) 939 fpi->use_rmii = 1; 940 } 941 942 /* make clock lookup non-fatal (the driver is shared among platforms), 943 * but require enable to succeed when a clock was specified/found, 944 * keep a reference to the clock upon successful acquisition 945 */ 946 clk = devm_clk_get(&ofdev->dev, "per"); 947 if (!IS_ERR(clk)) { 948 err = clk_prepare_enable(clk); 949 if (err) { 950 ret = err; 951 goto out_free_fpi; 952 } 953 fpi->clk_per = clk; 954 } 955 956 privsize = sizeof(*fep) + 957 sizeof(struct sk_buff **) * 958 (fpi->rx_ring + fpi->tx_ring); 959 960 ndev = alloc_etherdev(privsize); 961 if (!ndev) { 962 ret = -ENOMEM; 963 goto out_put; 964 } 965 966 SET_NETDEV_DEV(ndev, &ofdev->dev); 967 platform_set_drvdata(ofdev, ndev); 968 969 fep = netdev_priv(ndev); 970 fep->dev = &ofdev->dev; 971 fep->ndev = ndev; 972 fep->fpi = fpi; 973 fep->ops = match->data; 974 975 ret = fep->ops->setup_data(ndev); 976 if (ret) 977 goto out_free_dev; 978 979 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 980 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 981 982 spin_lock_init(&fep->lock); 983 spin_lock_init(&fep->tx_lock); 984 985 mac_addr = of_get_mac_address(ofdev->dev.of_node); 986 if (mac_addr) 987 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); 988 989 ret = fep->ops->allocate_bd(ndev); 990 if (ret) 991 goto out_cleanup_data; 992 993 fep->rx_bd_base = fep->ring_base; 994 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; 995 996 fep->tx_ring = fpi->tx_ring; 997 fep->rx_ring = fpi->rx_ring; 998 999 ndev->netdev_ops = &fs_enet_netdev_ops; 1000 ndev->watchdog_timeo = 2 * HZ; 1001 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); 1002 netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); 1003 1004 ndev->ethtool_ops = &fs_ethtool_ops; 1005 1006 init_timer(&fep->phy_timer_list); 1007 1008 netif_carrier_off(ndev); 1009 1010 ret = register_netdev(ndev); 1011 if (ret) 1012 goto out_free_bd; 1013 1014 pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); 1015 1016 return 0; 1017 1018 out_free_bd: 1019 fep->ops->free_bd(ndev); 1020 out_cleanup_data: 1021 fep->ops->cleanup_data(ndev); 1022 out_free_dev: 1023 free_netdev(ndev); 1024 out_put: 1025 of_node_put(fpi->phy_node); 1026 if (fpi->clk_per) 1027 clk_disable_unprepare(fpi->clk_per); 1028 out_free_fpi: 1029 kfree(fpi); 1030 return ret; 1031 } 1032 1033 static int fs_enet_remove(struct platform_device *ofdev) 1034 { 1035 struct net_device *ndev = platform_get_drvdata(ofdev); 1036 struct fs_enet_private *fep = netdev_priv(ndev); 1037 1038 unregister_netdev(ndev); 1039 1040 fep->ops->free_bd(ndev); 1041 fep->ops->cleanup_data(ndev); 1042 dev_set_drvdata(fep->dev, NULL); 1043 of_node_put(fep->fpi->phy_node); 1044 if (fep->fpi->clk_per) 1045 clk_disable_unprepare(fep->fpi->clk_per); 1046 free_netdev(ndev); 1047 return 0; 1048 } 1049 1050 static struct of_device_id fs_enet_match[] = { 1051 #ifdef CONFIG_FS_ENET_HAS_SCC 1052 { 1053 .compatible = "fsl,cpm1-scc-enet", 1054 .data = (void *)&fs_scc_ops, 1055 }, 1056 { 1057 .compatible = "fsl,cpm2-scc-enet", 1058 .data = (void *)&fs_scc_ops, 1059 }, 1060 #endif 1061 #ifdef CONFIG_FS_ENET_HAS_FCC 1062 { 1063 .compatible = "fsl,cpm2-fcc-enet", 1064 .data = (void *)&fs_fcc_ops, 1065 }, 1066 #endif 1067 #ifdef CONFIG_FS_ENET_HAS_FEC 1068 #ifdef CONFIG_FS_ENET_MPC5121_FEC 1069 { 1070 .compatible = "fsl,mpc5121-fec", 1071 .data = (void *)&fs_fec_ops, 1072 }, 1073 { 1074 .compatible = "fsl,mpc5125-fec", 1075 .data = (void *)&fs_fec_ops, 1076 }, 1077 #else 1078 { 1079 .compatible = "fsl,pq1-fec-enet", 1080 .data = (void *)&fs_fec_ops, 1081 }, 1082 #endif 1083 #endif 1084 {} 1085 }; 1086 MODULE_DEVICE_TABLE(of, fs_enet_match); 1087 1088 static struct platform_driver fs_enet_driver = { 1089 .driver = { 1090 .owner = THIS_MODULE, 1091 .name = "fs_enet", 1092 .of_match_table = fs_enet_match, 1093 }, 1094 .probe = fs_enet_probe, 1095 .remove = fs_enet_remove, 1096 }; 1097 1098 #ifdef CONFIG_NET_POLL_CONTROLLER 1099 static void fs_enet_netpoll(struct net_device *dev) 1100 { 1101 disable_irq(dev->irq); 1102 fs_enet_interrupt(dev->irq, dev); 1103 enable_irq(dev->irq); 1104 } 1105 #endif 1106 1107 module_platform_driver(fs_enet_driver); 1108