1 /* 2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. 3 * 4 * Copyright (c) 2003 Intracom S.A. 5 * by Pantelis Antoniou <panto@intracom.gr> 6 * 7 * 2005 (c) MontaVista Software, Inc. 8 * Vitaly Bordug <vbordug@ru.mvista.com> 9 * 10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> 11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> 12 * 13 * This file is licensed under the terms of the GNU General Public License 14 * version 2. This program is licensed "as is" without any warranty of any 15 * kind, whether express or implied. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/kernel.h> 20 #include <linux/types.h> 21 #include <linux/string.h> 22 #include <linux/ptrace.h> 23 #include <linux/errno.h> 24 #include <linux/ioport.h> 25 #include <linux/slab.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/delay.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/skbuff.h> 32 #include <linux/spinlock.h> 33 #include <linux/mii.h> 34 #include <linux/ethtool.h> 35 #include <linux/bitops.h> 36 #include <linux/fs.h> 37 #include <linux/platform_device.h> 38 #include <linux/phy.h> 39 #include <linux/of.h> 40 #include <linux/of_mdio.h> 41 #include <linux/of_platform.h> 42 #include <linux/of_gpio.h> 43 #include <linux/of_net.h> 44 45 #include <linux/vmalloc.h> 46 #include <asm/pgtable.h> 47 #include <asm/irq.h> 48 #include <asm/uaccess.h> 49 50 #include "fs_enet.h" 51 52 /*************************************************/ 53 54 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); 55 MODULE_DESCRIPTION("Freescale Ethernet Driver"); 56 MODULE_LICENSE("GPL"); 57 MODULE_VERSION(DRV_MODULE_VERSION); 58 59 static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ 60 module_param(fs_enet_debug, int, 0); 61 MODULE_PARM_DESC(fs_enet_debug, 62 "Freescale bitmapped debugging message enable value"); 63 64 #ifdef CONFIG_NET_POLL_CONTROLLER 65 static void fs_enet_netpoll(struct net_device *dev); 66 #endif 67 68 static void fs_set_multicast_list(struct net_device *dev) 69 { 70 struct fs_enet_private *fep = netdev_priv(dev); 71 72 (*fep->ops->set_multicast_list)(dev); 73 } 74 75 static void skb_align(struct sk_buff *skb, int align) 76 { 77 int off = ((unsigned long)skb->data) & (align - 1); 78 79 if (off) 80 skb_reserve(skb, align - off); 81 } 82 83 /* NAPI receive function */ 84 static int fs_enet_rx_napi(struct napi_struct *napi, int budget) 85 { 86 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); 87 struct net_device *dev = fep->ndev; 88 const struct fs_platform_info *fpi = fep->fpi; 89 cbd_t __iomem *bdp; 90 struct sk_buff *skb, *skbn, *skbt; 91 int received = 0; 92 u16 pkt_len, sc; 93 int curidx; 94 95 /* 96 * First, grab all of the stats for the incoming packet. 97 * These get messed up if we get called due to a busy condition. 98 */ 99 bdp = fep->cur_rx; 100 101 /* clear RX status bits for napi*/ 102 (*fep->ops->napi_clear_rx_event)(dev); 103 104 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { 105 curidx = bdp - fep->rx_bd_base; 106 107 /* 108 * Since we have allocated space to hold a complete frame, 109 * the last indicator should be set. 110 */ 111 if ((sc & BD_ENET_RX_LAST) == 0) 112 dev_warn(fep->dev, "rcv is not +last\n"); 113 114 /* 115 * Check for errors. 116 */ 117 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | 118 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { 119 fep->stats.rx_errors++; 120 /* Frame too long or too short. */ 121 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) 122 fep->stats.rx_length_errors++; 123 /* Frame alignment */ 124 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 125 fep->stats.rx_frame_errors++; 126 /* CRC Error */ 127 if (sc & BD_ENET_RX_CR) 128 fep->stats.rx_crc_errors++; 129 /* FIFO overrun */ 130 if (sc & BD_ENET_RX_OV) 131 fep->stats.rx_crc_errors++; 132 133 skb = fep->rx_skbuff[curidx]; 134 135 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 136 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 137 DMA_FROM_DEVICE); 138 139 skbn = skb; 140 141 } else { 142 skb = fep->rx_skbuff[curidx]; 143 144 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 145 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 146 DMA_FROM_DEVICE); 147 148 /* 149 * Process the incoming frame. 150 */ 151 fep->stats.rx_packets++; 152 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ 153 fep->stats.rx_bytes += pkt_len + 4; 154 155 if (pkt_len <= fpi->rx_copybreak) { 156 /* +2 to make IP header L1 cache aligned */ 157 skbn = netdev_alloc_skb(dev, pkt_len + 2); 158 if (skbn != NULL) { 159 skb_reserve(skbn, 2); /* align IP header */ 160 skb_copy_from_linear_data(skb, 161 skbn->data, pkt_len); 162 /* swap */ 163 skbt = skb; 164 skb = skbn; 165 skbn = skbt; 166 } 167 } else { 168 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 169 170 if (skbn) 171 skb_align(skbn, ENET_RX_ALIGN); 172 } 173 174 if (skbn != NULL) { 175 skb_put(skb, pkt_len); /* Make room */ 176 skb->protocol = eth_type_trans(skb, dev); 177 received++; 178 netif_receive_skb(skb); 179 } else { 180 fep->stats.rx_dropped++; 181 skbn = skb; 182 } 183 } 184 185 fep->rx_skbuff[curidx] = skbn; 186 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, 187 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 188 DMA_FROM_DEVICE)); 189 CBDW_DATLEN(bdp, 0); 190 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); 191 192 /* 193 * Update BD pointer to next entry. 194 */ 195 if ((sc & BD_ENET_RX_WRAP) == 0) 196 bdp++; 197 else 198 bdp = fep->rx_bd_base; 199 200 (*fep->ops->rx_bd_done)(dev); 201 202 if (received >= budget) 203 break; 204 } 205 206 fep->cur_rx = bdp; 207 208 if (received < budget) { 209 /* done */ 210 napi_complete(napi); 211 (*fep->ops->napi_enable_rx)(dev); 212 } 213 return received; 214 } 215 216 /* non NAPI receive function */ 217 static int fs_enet_rx_non_napi(struct net_device *dev) 218 { 219 struct fs_enet_private *fep = netdev_priv(dev); 220 const struct fs_platform_info *fpi = fep->fpi; 221 cbd_t __iomem *bdp; 222 struct sk_buff *skb, *skbn, *skbt; 223 int received = 0; 224 u16 pkt_len, sc; 225 int curidx; 226 /* 227 * First, grab all of the stats for the incoming packet. 228 * These get messed up if we get called due to a busy condition. 229 */ 230 bdp = fep->cur_rx; 231 232 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { 233 234 curidx = bdp - fep->rx_bd_base; 235 236 /* 237 * Since we have allocated space to hold a complete frame, 238 * the last indicator should be set. 239 */ 240 if ((sc & BD_ENET_RX_LAST) == 0) 241 dev_warn(fep->dev, "rcv is not +last\n"); 242 243 /* 244 * Check for errors. 245 */ 246 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | 247 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { 248 fep->stats.rx_errors++; 249 /* Frame too long or too short. */ 250 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) 251 fep->stats.rx_length_errors++; 252 /* Frame alignment */ 253 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 254 fep->stats.rx_frame_errors++; 255 /* CRC Error */ 256 if (sc & BD_ENET_RX_CR) 257 fep->stats.rx_crc_errors++; 258 /* FIFO overrun */ 259 if (sc & BD_ENET_RX_OV) 260 fep->stats.rx_crc_errors++; 261 262 skb = fep->rx_skbuff[curidx]; 263 264 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 265 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 266 DMA_FROM_DEVICE); 267 268 skbn = skb; 269 270 } else { 271 272 skb = fep->rx_skbuff[curidx]; 273 274 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 275 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 276 DMA_FROM_DEVICE); 277 278 /* 279 * Process the incoming frame. 280 */ 281 fep->stats.rx_packets++; 282 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ 283 fep->stats.rx_bytes += pkt_len + 4; 284 285 if (pkt_len <= fpi->rx_copybreak) { 286 /* +2 to make IP header L1 cache aligned */ 287 skbn = netdev_alloc_skb(dev, pkt_len + 2); 288 if (skbn != NULL) { 289 skb_reserve(skbn, 2); /* align IP header */ 290 skb_copy_from_linear_data(skb, 291 skbn->data, pkt_len); 292 /* swap */ 293 skbt = skb; 294 skb = skbn; 295 skbn = skbt; 296 } 297 } else { 298 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 299 300 if (skbn) 301 skb_align(skbn, ENET_RX_ALIGN); 302 } 303 304 if (skbn != NULL) { 305 skb_put(skb, pkt_len); /* Make room */ 306 skb->protocol = eth_type_trans(skb, dev); 307 received++; 308 netif_rx(skb); 309 } else { 310 fep->stats.rx_dropped++; 311 skbn = skb; 312 } 313 } 314 315 fep->rx_skbuff[curidx] = skbn; 316 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, 317 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 318 DMA_FROM_DEVICE)); 319 CBDW_DATLEN(bdp, 0); 320 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); 321 322 /* 323 * Update BD pointer to next entry. 324 */ 325 if ((sc & BD_ENET_RX_WRAP) == 0) 326 bdp++; 327 else 328 bdp = fep->rx_bd_base; 329 330 (*fep->ops->rx_bd_done)(dev); 331 } 332 333 fep->cur_rx = bdp; 334 335 return 0; 336 } 337 338 static void fs_enet_tx(struct net_device *dev) 339 { 340 struct fs_enet_private *fep = netdev_priv(dev); 341 cbd_t __iomem *bdp; 342 struct sk_buff *skb; 343 int dirtyidx, do_wake, do_restart; 344 u16 sc; 345 346 spin_lock(&fep->tx_lock); 347 bdp = fep->dirty_tx; 348 349 do_wake = do_restart = 0; 350 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { 351 dirtyidx = bdp - fep->tx_bd_base; 352 353 if (fep->tx_free == fep->tx_ring) 354 break; 355 356 skb = fep->tx_skbuff[dirtyidx]; 357 358 /* 359 * Check for errors. 360 */ 361 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 362 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { 363 364 if (sc & BD_ENET_TX_HB) /* No heartbeat */ 365 fep->stats.tx_heartbeat_errors++; 366 if (sc & BD_ENET_TX_LC) /* Late collision */ 367 fep->stats.tx_window_errors++; 368 if (sc & BD_ENET_TX_RL) /* Retrans limit */ 369 fep->stats.tx_aborted_errors++; 370 if (sc & BD_ENET_TX_UN) /* Underrun */ 371 fep->stats.tx_fifo_errors++; 372 if (sc & BD_ENET_TX_CSL) /* Carrier lost */ 373 fep->stats.tx_carrier_errors++; 374 375 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { 376 fep->stats.tx_errors++; 377 do_restart = 1; 378 } 379 } else 380 fep->stats.tx_packets++; 381 382 if (sc & BD_ENET_TX_READY) { 383 dev_warn(fep->dev, 384 "HEY! Enet xmit interrupt and TX_READY.\n"); 385 } 386 387 /* 388 * Deferred means some collisions occurred during transmit, 389 * but we eventually sent the packet OK. 390 */ 391 if (sc & BD_ENET_TX_DEF) 392 fep->stats.collisions++; 393 394 /* unmap */ 395 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 396 skb->len, DMA_TO_DEVICE); 397 398 /* 399 * Free the sk buffer associated with this last transmit. 400 */ 401 dev_kfree_skb_irq(skb); 402 fep->tx_skbuff[dirtyidx] = NULL; 403 404 /* 405 * Update pointer to next buffer descriptor to be transmitted. 406 */ 407 if ((sc & BD_ENET_TX_WRAP) == 0) 408 bdp++; 409 else 410 bdp = fep->tx_bd_base; 411 412 /* 413 * Since we have freed up a buffer, the ring is no longer 414 * full. 415 */ 416 if (!fep->tx_free++) 417 do_wake = 1; 418 } 419 420 fep->dirty_tx = bdp; 421 422 if (do_restart) 423 (*fep->ops->tx_restart)(dev); 424 425 spin_unlock(&fep->tx_lock); 426 427 if (do_wake) 428 netif_wake_queue(dev); 429 } 430 431 /* 432 * The interrupt handler. 433 * This is called from the MPC core interrupt. 434 */ 435 static irqreturn_t 436 fs_enet_interrupt(int irq, void *dev_id) 437 { 438 struct net_device *dev = dev_id; 439 struct fs_enet_private *fep; 440 const struct fs_platform_info *fpi; 441 u32 int_events; 442 u32 int_clr_events; 443 int nr, napi_ok; 444 int handled; 445 446 fep = netdev_priv(dev); 447 fpi = fep->fpi; 448 449 nr = 0; 450 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { 451 nr++; 452 453 int_clr_events = int_events; 454 if (fpi->use_napi) 455 int_clr_events &= ~fep->ev_napi_rx; 456 457 (*fep->ops->clear_int_events)(dev, int_clr_events); 458 459 if (int_events & fep->ev_err) 460 (*fep->ops->ev_error)(dev, int_events); 461 462 if (int_events & fep->ev_rx) { 463 if (!fpi->use_napi) 464 fs_enet_rx_non_napi(dev); 465 else { 466 napi_ok = napi_schedule_prep(&fep->napi); 467 468 (*fep->ops->napi_disable_rx)(dev); 469 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); 470 471 /* NOTE: it is possible for FCCs in NAPI mode */ 472 /* to submit a spurious interrupt while in poll */ 473 if (napi_ok) 474 __napi_schedule(&fep->napi); 475 } 476 } 477 478 if (int_events & fep->ev_tx) 479 fs_enet_tx(dev); 480 } 481 482 handled = nr > 0; 483 return IRQ_RETVAL(handled); 484 } 485 486 void fs_init_bds(struct net_device *dev) 487 { 488 struct fs_enet_private *fep = netdev_priv(dev); 489 cbd_t __iomem *bdp; 490 struct sk_buff *skb; 491 int i; 492 493 fs_cleanup_bds(dev); 494 495 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 496 fep->tx_free = fep->tx_ring; 497 fep->cur_rx = fep->rx_bd_base; 498 499 /* 500 * Initialize the receive buffer descriptors. 501 */ 502 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 503 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 504 if (skb == NULL) 505 break; 506 507 skb_align(skb, ENET_RX_ALIGN); 508 fep->rx_skbuff[i] = skb; 509 CBDW_BUFADDR(bdp, 510 dma_map_single(fep->dev, skb->data, 511 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 512 DMA_FROM_DEVICE)); 513 CBDW_DATLEN(bdp, 0); /* zero */ 514 CBDW_SC(bdp, BD_ENET_RX_EMPTY | 515 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); 516 } 517 /* 518 * if we failed, fillup remainder 519 */ 520 for (; i < fep->rx_ring; i++, bdp++) { 521 fep->rx_skbuff[i] = NULL; 522 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); 523 } 524 525 /* 526 * ...and the same for transmit. 527 */ 528 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 529 fep->tx_skbuff[i] = NULL; 530 CBDW_BUFADDR(bdp, 0); 531 CBDW_DATLEN(bdp, 0); 532 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); 533 } 534 } 535 536 void fs_cleanup_bds(struct net_device *dev) 537 { 538 struct fs_enet_private *fep = netdev_priv(dev); 539 struct sk_buff *skb; 540 cbd_t __iomem *bdp; 541 int i; 542 543 /* 544 * Reset SKB transmit buffers. 545 */ 546 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 547 if ((skb = fep->tx_skbuff[i]) == NULL) 548 continue; 549 550 /* unmap */ 551 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 552 skb->len, DMA_TO_DEVICE); 553 554 fep->tx_skbuff[i] = NULL; 555 dev_kfree_skb(skb); 556 } 557 558 /* 559 * Reset SKB receive buffers 560 */ 561 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 562 if ((skb = fep->rx_skbuff[i]) == NULL) 563 continue; 564 565 /* unmap */ 566 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 567 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 568 DMA_FROM_DEVICE); 569 570 fep->rx_skbuff[i] = NULL; 571 572 dev_kfree_skb(skb); 573 } 574 } 575 576 /**********************************************************************************/ 577 578 #ifdef CONFIG_FS_ENET_MPC5121_FEC 579 /* 580 * MPC5121 FEC requeries 4-byte alignment for TX data buffer! 581 */ 582 static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, 583 struct sk_buff *skb) 584 { 585 struct sk_buff *new_skb; 586 587 /* Alloc new skb */ 588 new_skb = netdev_alloc_skb(dev, skb->len + 4); 589 if (!new_skb) 590 return NULL; 591 592 /* Make sure new skb is properly aligned */ 593 skb_align(new_skb, 4); 594 595 /* Copy data to new skb ... */ 596 skb_copy_from_linear_data(skb, new_skb->data, skb->len); 597 skb_put(new_skb, skb->len); 598 599 /* ... and free an old one */ 600 dev_kfree_skb_any(skb); 601 602 return new_skb; 603 } 604 #endif 605 606 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 607 { 608 struct fs_enet_private *fep = netdev_priv(dev); 609 cbd_t __iomem *bdp; 610 int curidx; 611 u16 sc; 612 unsigned long flags; 613 614 #ifdef CONFIG_FS_ENET_MPC5121_FEC 615 if (((unsigned long)skb->data) & 0x3) { 616 skb = tx_skb_align_workaround(dev, skb); 617 if (!skb) { 618 /* 619 * We have lost packet due to memory allocation error 620 * in tx_skb_align_workaround(). Hopefully original 621 * skb is still valid, so try transmit it later. 622 */ 623 return NETDEV_TX_BUSY; 624 } 625 } 626 #endif 627 spin_lock_irqsave(&fep->tx_lock, flags); 628 629 /* 630 * Fill in a Tx ring entry 631 */ 632 bdp = fep->cur_tx; 633 634 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 635 netif_stop_queue(dev); 636 spin_unlock_irqrestore(&fep->tx_lock, flags); 637 638 /* 639 * Ooops. All transmit buffers are full. Bail out. 640 * This should not happen, since the tx queue should be stopped. 641 */ 642 dev_warn(fep->dev, "tx queue full!.\n"); 643 return NETDEV_TX_BUSY; 644 } 645 646 curidx = bdp - fep->tx_bd_base; 647 /* 648 * Clear all of the status flags. 649 */ 650 CBDC_SC(bdp, BD_ENET_TX_STATS); 651 652 /* 653 * Save skb pointer. 654 */ 655 fep->tx_skbuff[curidx] = skb; 656 657 fep->stats.tx_bytes += skb->len; 658 659 /* 660 * Push the data cache so the CPM does not get stale memory data. 661 */ 662 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 663 skb->data, skb->len, DMA_TO_DEVICE)); 664 CBDW_DATLEN(bdp, skb->len); 665 666 /* 667 * If this was the last BD in the ring, start at the beginning again. 668 */ 669 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 670 fep->cur_tx++; 671 else 672 fep->cur_tx = fep->tx_bd_base; 673 674 if (!--fep->tx_free) 675 netif_stop_queue(dev); 676 677 /* Trigger transmission start */ 678 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 679 BD_ENET_TX_LAST | BD_ENET_TX_TC; 680 681 /* note that while FEC does not have this bit 682 * it marks it as available for software use 683 * yay for hw reuse :) */ 684 if (skb->len <= 60) 685 sc |= BD_ENET_TX_PAD; 686 CBDS_SC(bdp, sc); 687 688 skb_tx_timestamp(skb); 689 690 (*fep->ops->tx_kickstart)(dev); 691 692 spin_unlock_irqrestore(&fep->tx_lock, flags); 693 694 return NETDEV_TX_OK; 695 } 696 697 static void fs_timeout(struct net_device *dev) 698 { 699 struct fs_enet_private *fep = netdev_priv(dev); 700 unsigned long flags; 701 int wake = 0; 702 703 fep->stats.tx_errors++; 704 705 spin_lock_irqsave(&fep->lock, flags); 706 707 if (dev->flags & IFF_UP) { 708 phy_stop(fep->phydev); 709 (*fep->ops->stop)(dev); 710 (*fep->ops->restart)(dev); 711 phy_start(fep->phydev); 712 } 713 714 phy_start(fep->phydev); 715 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 716 spin_unlock_irqrestore(&fep->lock, flags); 717 718 if (wake) 719 netif_wake_queue(dev); 720 } 721 722 /*----------------------------------------------------------------------------- 723 * generic link-change handler - should be sufficient for most cases 724 *-----------------------------------------------------------------------------*/ 725 static void generic_adjust_link(struct net_device *dev) 726 { 727 struct fs_enet_private *fep = netdev_priv(dev); 728 struct phy_device *phydev = fep->phydev; 729 int new_state = 0; 730 731 if (phydev->link) { 732 /* adjust to duplex mode */ 733 if (phydev->duplex != fep->oldduplex) { 734 new_state = 1; 735 fep->oldduplex = phydev->duplex; 736 } 737 738 if (phydev->speed != fep->oldspeed) { 739 new_state = 1; 740 fep->oldspeed = phydev->speed; 741 } 742 743 if (!fep->oldlink) { 744 new_state = 1; 745 fep->oldlink = 1; 746 } 747 748 if (new_state) 749 fep->ops->restart(dev); 750 } else if (fep->oldlink) { 751 new_state = 1; 752 fep->oldlink = 0; 753 fep->oldspeed = 0; 754 fep->oldduplex = -1; 755 } 756 757 if (new_state && netif_msg_link(fep)) 758 phy_print_status(phydev); 759 } 760 761 762 static void fs_adjust_link(struct net_device *dev) 763 { 764 struct fs_enet_private *fep = netdev_priv(dev); 765 unsigned long flags; 766 767 spin_lock_irqsave(&fep->lock, flags); 768 769 if(fep->ops->adjust_link) 770 fep->ops->adjust_link(dev); 771 else 772 generic_adjust_link(dev); 773 774 spin_unlock_irqrestore(&fep->lock, flags); 775 } 776 777 static int fs_init_phy(struct net_device *dev) 778 { 779 struct fs_enet_private *fep = netdev_priv(dev); 780 struct phy_device *phydev; 781 phy_interface_t iface; 782 783 fep->oldlink = 0; 784 fep->oldspeed = 0; 785 fep->oldduplex = -1; 786 787 iface = fep->fpi->use_rmii ? 788 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII; 789 790 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, 791 iface); 792 if (!phydev) { 793 phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, 794 iface); 795 } 796 if (!phydev) { 797 dev_err(&dev->dev, "Could not attach to PHY\n"); 798 return -ENODEV; 799 } 800 801 fep->phydev = phydev; 802 803 return 0; 804 } 805 806 static int fs_enet_open(struct net_device *dev) 807 { 808 struct fs_enet_private *fep = netdev_priv(dev); 809 int r; 810 int err; 811 812 /* to initialize the fep->cur_rx,... */ 813 /* not doing this, will cause a crash in fs_enet_rx_napi */ 814 fs_init_bds(fep->ndev); 815 816 if (fep->fpi->use_napi) 817 napi_enable(&fep->napi); 818 819 /* Install our interrupt handler. */ 820 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, 821 "fs_enet-mac", dev); 822 if (r != 0) { 823 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); 824 if (fep->fpi->use_napi) 825 napi_disable(&fep->napi); 826 return -EINVAL; 827 } 828 829 err = fs_init_phy(dev); 830 if (err) { 831 free_irq(fep->interrupt, dev); 832 if (fep->fpi->use_napi) 833 napi_disable(&fep->napi); 834 return err; 835 } 836 phy_start(fep->phydev); 837 838 netif_start_queue(dev); 839 840 return 0; 841 } 842 843 static int fs_enet_close(struct net_device *dev) 844 { 845 struct fs_enet_private *fep = netdev_priv(dev); 846 unsigned long flags; 847 848 netif_stop_queue(dev); 849 netif_carrier_off(dev); 850 if (fep->fpi->use_napi) 851 napi_disable(&fep->napi); 852 phy_stop(fep->phydev); 853 854 spin_lock_irqsave(&fep->lock, flags); 855 spin_lock(&fep->tx_lock); 856 (*fep->ops->stop)(dev); 857 spin_unlock(&fep->tx_lock); 858 spin_unlock_irqrestore(&fep->lock, flags); 859 860 /* release any irqs */ 861 phy_disconnect(fep->phydev); 862 fep->phydev = NULL; 863 free_irq(fep->interrupt, dev); 864 865 return 0; 866 } 867 868 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) 869 { 870 struct fs_enet_private *fep = netdev_priv(dev); 871 return &fep->stats; 872 } 873 874 /*************************************************************************/ 875 876 static void fs_get_drvinfo(struct net_device *dev, 877 struct ethtool_drvinfo *info) 878 { 879 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 880 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 881 } 882 883 static int fs_get_regs_len(struct net_device *dev) 884 { 885 struct fs_enet_private *fep = netdev_priv(dev); 886 887 return (*fep->ops->get_regs_len)(dev); 888 } 889 890 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, 891 void *p) 892 { 893 struct fs_enet_private *fep = netdev_priv(dev); 894 unsigned long flags; 895 int r, len; 896 897 len = regs->len; 898 899 spin_lock_irqsave(&fep->lock, flags); 900 r = (*fep->ops->get_regs)(dev, p, &len); 901 spin_unlock_irqrestore(&fep->lock, flags); 902 903 if (r == 0) 904 regs->version = 0; 905 } 906 907 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 908 { 909 struct fs_enet_private *fep = netdev_priv(dev); 910 911 if (!fep->phydev) 912 return -ENODEV; 913 914 return phy_ethtool_gset(fep->phydev, cmd); 915 } 916 917 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 918 { 919 struct fs_enet_private *fep = netdev_priv(dev); 920 921 if (!fep->phydev) 922 return -ENODEV; 923 924 return phy_ethtool_sset(fep->phydev, cmd); 925 } 926 927 static int fs_nway_reset(struct net_device *dev) 928 { 929 return 0; 930 } 931 932 static u32 fs_get_msglevel(struct net_device *dev) 933 { 934 struct fs_enet_private *fep = netdev_priv(dev); 935 return fep->msg_enable; 936 } 937 938 static void fs_set_msglevel(struct net_device *dev, u32 value) 939 { 940 struct fs_enet_private *fep = netdev_priv(dev); 941 fep->msg_enable = value; 942 } 943 944 static const struct ethtool_ops fs_ethtool_ops = { 945 .get_drvinfo = fs_get_drvinfo, 946 .get_regs_len = fs_get_regs_len, 947 .get_settings = fs_get_settings, 948 .set_settings = fs_set_settings, 949 .nway_reset = fs_nway_reset, 950 .get_link = ethtool_op_get_link, 951 .get_msglevel = fs_get_msglevel, 952 .set_msglevel = fs_set_msglevel, 953 .get_regs = fs_get_regs, 954 .get_ts_info = ethtool_op_get_ts_info, 955 }; 956 957 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 958 { 959 struct fs_enet_private *fep = netdev_priv(dev); 960 961 if (!netif_running(dev)) 962 return -EINVAL; 963 964 return phy_mii_ioctl(fep->phydev, rq, cmd); 965 } 966 967 extern int fs_mii_connect(struct net_device *dev); 968 extern void fs_mii_disconnect(struct net_device *dev); 969 970 /**************************************************************************************/ 971 972 #ifdef CONFIG_FS_ENET_HAS_FEC 973 #define IS_FEC(match) ((match)->data == &fs_fec_ops) 974 #else 975 #define IS_FEC(match) 0 976 #endif 977 978 static const struct net_device_ops fs_enet_netdev_ops = { 979 .ndo_open = fs_enet_open, 980 .ndo_stop = fs_enet_close, 981 .ndo_get_stats = fs_enet_get_stats, 982 .ndo_start_xmit = fs_enet_start_xmit, 983 .ndo_tx_timeout = fs_timeout, 984 .ndo_set_rx_mode = fs_set_multicast_list, 985 .ndo_do_ioctl = fs_ioctl, 986 .ndo_validate_addr = eth_validate_addr, 987 .ndo_set_mac_address = eth_mac_addr, 988 .ndo_change_mtu = eth_change_mtu, 989 #ifdef CONFIG_NET_POLL_CONTROLLER 990 .ndo_poll_controller = fs_enet_netpoll, 991 #endif 992 }; 993 994 static struct of_device_id fs_enet_match[]; 995 static int fs_enet_probe(struct platform_device *ofdev) 996 { 997 const struct of_device_id *match; 998 struct net_device *ndev; 999 struct fs_enet_private *fep; 1000 struct fs_platform_info *fpi; 1001 const u32 *data; 1002 struct clk *clk; 1003 int err; 1004 const u8 *mac_addr; 1005 const char *phy_connection_type; 1006 int privsize, len, ret = -ENODEV; 1007 1008 match = of_match_device(fs_enet_match, &ofdev->dev); 1009 if (!match) 1010 return -EINVAL; 1011 1012 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 1013 if (!fpi) 1014 return -ENOMEM; 1015 1016 if (!IS_FEC(match)) { 1017 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 1018 if (!data || len != 4) 1019 goto out_free_fpi; 1020 1021 fpi->cp_command = *data; 1022 } 1023 1024 fpi->rx_ring = 32; 1025 fpi->tx_ring = 32; 1026 fpi->rx_copybreak = 240; 1027 fpi->use_napi = 1; 1028 fpi->napi_weight = 17; 1029 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 1030 if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", 1031 NULL))) 1032 goto out_free_fpi; 1033 1034 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { 1035 phy_connection_type = of_get_property(ofdev->dev.of_node, 1036 "phy-connection-type", NULL); 1037 if (phy_connection_type && !strcmp("rmii", phy_connection_type)) 1038 fpi->use_rmii = 1; 1039 } 1040 1041 /* make clock lookup non-fatal (the driver is shared among platforms), 1042 * but require enable to succeed when a clock was specified/found, 1043 * keep a reference to the clock upon successful acquisition 1044 */ 1045 clk = devm_clk_get(&ofdev->dev, "per"); 1046 if (!IS_ERR(clk)) { 1047 err = clk_prepare_enable(clk); 1048 if (err) { 1049 ret = err; 1050 goto out_free_fpi; 1051 } 1052 fpi->clk_per = clk; 1053 } 1054 1055 privsize = sizeof(*fep) + 1056 sizeof(struct sk_buff **) * 1057 (fpi->rx_ring + fpi->tx_ring); 1058 1059 ndev = alloc_etherdev(privsize); 1060 if (!ndev) { 1061 ret = -ENOMEM; 1062 goto out_put; 1063 } 1064 1065 SET_NETDEV_DEV(ndev, &ofdev->dev); 1066 platform_set_drvdata(ofdev, ndev); 1067 1068 fep = netdev_priv(ndev); 1069 fep->dev = &ofdev->dev; 1070 fep->ndev = ndev; 1071 fep->fpi = fpi; 1072 fep->ops = match->data; 1073 1074 ret = fep->ops->setup_data(ndev); 1075 if (ret) 1076 goto out_free_dev; 1077 1078 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 1079 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 1080 1081 spin_lock_init(&fep->lock); 1082 spin_lock_init(&fep->tx_lock); 1083 1084 mac_addr = of_get_mac_address(ofdev->dev.of_node); 1085 if (mac_addr) 1086 memcpy(ndev->dev_addr, mac_addr, 6); 1087 1088 ret = fep->ops->allocate_bd(ndev); 1089 if (ret) 1090 goto out_cleanup_data; 1091 1092 fep->rx_bd_base = fep->ring_base; 1093 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; 1094 1095 fep->tx_ring = fpi->tx_ring; 1096 fep->rx_ring = fpi->rx_ring; 1097 1098 ndev->netdev_ops = &fs_enet_netdev_ops; 1099 ndev->watchdog_timeo = 2 * HZ; 1100 if (fpi->use_napi) 1101 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, 1102 fpi->napi_weight); 1103 1104 ndev->ethtool_ops = &fs_ethtool_ops; 1105 1106 init_timer(&fep->phy_timer_list); 1107 1108 netif_carrier_off(ndev); 1109 1110 ret = register_netdev(ndev); 1111 if (ret) 1112 goto out_free_bd; 1113 1114 pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); 1115 1116 return 0; 1117 1118 out_free_bd: 1119 fep->ops->free_bd(ndev); 1120 out_cleanup_data: 1121 fep->ops->cleanup_data(ndev); 1122 out_free_dev: 1123 free_netdev(ndev); 1124 out_put: 1125 of_node_put(fpi->phy_node); 1126 if (fpi->clk_per) 1127 clk_disable_unprepare(fpi->clk_per); 1128 out_free_fpi: 1129 kfree(fpi); 1130 return ret; 1131 } 1132 1133 static int fs_enet_remove(struct platform_device *ofdev) 1134 { 1135 struct net_device *ndev = platform_get_drvdata(ofdev); 1136 struct fs_enet_private *fep = netdev_priv(ndev); 1137 1138 unregister_netdev(ndev); 1139 1140 fep->ops->free_bd(ndev); 1141 fep->ops->cleanup_data(ndev); 1142 dev_set_drvdata(fep->dev, NULL); 1143 of_node_put(fep->fpi->phy_node); 1144 if (fep->fpi->clk_per) 1145 clk_disable_unprepare(fep->fpi->clk_per); 1146 free_netdev(ndev); 1147 return 0; 1148 } 1149 1150 static struct of_device_id fs_enet_match[] = { 1151 #ifdef CONFIG_FS_ENET_HAS_SCC 1152 { 1153 .compatible = "fsl,cpm1-scc-enet", 1154 .data = (void *)&fs_scc_ops, 1155 }, 1156 { 1157 .compatible = "fsl,cpm2-scc-enet", 1158 .data = (void *)&fs_scc_ops, 1159 }, 1160 #endif 1161 #ifdef CONFIG_FS_ENET_HAS_FCC 1162 { 1163 .compatible = "fsl,cpm2-fcc-enet", 1164 .data = (void *)&fs_fcc_ops, 1165 }, 1166 #endif 1167 #ifdef CONFIG_FS_ENET_HAS_FEC 1168 #ifdef CONFIG_FS_ENET_MPC5121_FEC 1169 { 1170 .compatible = "fsl,mpc5121-fec", 1171 .data = (void *)&fs_fec_ops, 1172 }, 1173 { 1174 .compatible = "fsl,mpc5125-fec", 1175 .data = (void *)&fs_fec_ops, 1176 }, 1177 #else 1178 { 1179 .compatible = "fsl,pq1-fec-enet", 1180 .data = (void *)&fs_fec_ops, 1181 }, 1182 #endif 1183 #endif 1184 {} 1185 }; 1186 MODULE_DEVICE_TABLE(of, fs_enet_match); 1187 1188 static struct platform_driver fs_enet_driver = { 1189 .driver = { 1190 .owner = THIS_MODULE, 1191 .name = "fs_enet", 1192 .of_match_table = fs_enet_match, 1193 }, 1194 .probe = fs_enet_probe, 1195 .remove = fs_enet_remove, 1196 }; 1197 1198 #ifdef CONFIG_NET_POLL_CONTROLLER 1199 static void fs_enet_netpoll(struct net_device *dev) 1200 { 1201 disable_irq(dev->irq); 1202 fs_enet_interrupt(dev->irq, dev); 1203 enable_irq(dev->irq); 1204 } 1205 #endif 1206 1207 module_platform_driver(fs_enet_driver); 1208