1 /* 2 * Driver for BCM963xx builtin Ethernet mac 3 * 4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/clk.h> 24 #include <linux/etherdevice.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/ethtool.h> 28 #include <linux/crc32.h> 29 #include <linux/err.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/platform_device.h> 32 #include <linux/if_vlan.h> 33 34 #include <bcm63xx_dev_enet.h> 35 #include "bcm63xx_enet.h" 36 37 static char bcm_enet_driver_name[] = "bcm63xx_enet"; 38 static char bcm_enet_driver_version[] = "1.0"; 39 40 static int copybreak __read_mostly = 128; 41 module_param(copybreak, int, 0); 42 MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 43 44 /* io registers memory shared between all devices */ 45 static void __iomem *bcm_enet_shared_base[3]; 46 47 /* 48 * io helpers to access mac registers 49 */ 50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) 51 { 52 return bcm_readl(priv->base + off); 53 } 54 55 static inline void enet_writel(struct bcm_enet_priv *priv, 56 u32 val, u32 off) 57 { 58 bcm_writel(val, priv->base + off); 59 } 60 61 /* 62 * io helpers to access switch registers 63 */ 64 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) 65 { 66 return bcm_readl(priv->base + off); 67 } 68 69 static inline void enetsw_writel(struct bcm_enet_priv *priv, 70 u32 val, u32 off) 71 { 72 bcm_writel(val, priv->base + off); 73 } 74 75 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) 76 { 77 return bcm_readw(priv->base + off); 78 } 79 80 static inline void enetsw_writew(struct bcm_enet_priv *priv, 81 u16 val, u32 off) 82 { 83 bcm_writew(val, priv->base + off); 84 } 85 86 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) 87 { 88 return bcm_readb(priv->base + off); 89 } 90 91 static inline void enetsw_writeb(struct bcm_enet_priv *priv, 92 u8 val, u32 off) 93 { 94 bcm_writeb(val, priv->base + off); 95 } 96 97 98 /* io helpers to access shared registers */ 99 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 100 { 101 return bcm_readl(bcm_enet_shared_base[0] + off); 102 } 103 104 static inline void enet_dma_writel(struct bcm_enet_priv *priv, 105 u32 val, u32 off) 106 { 107 bcm_writel(val, bcm_enet_shared_base[0] + off); 108 } 109 110 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) 111 { 112 return bcm_readl(bcm_enet_shared_base[1] + 113 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 114 } 115 116 static inline void enet_dmac_writel(struct bcm_enet_priv *priv, 117 u32 val, u32 off, int chan) 118 { 119 bcm_writel(val, bcm_enet_shared_base[1] + 120 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 121 } 122 123 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) 124 { 125 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 126 } 127 128 static inline void enet_dmas_writel(struct bcm_enet_priv *priv, 129 u32 val, u32 off, int chan) 130 { 131 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 132 } 133 134 /* 135 * write given data into mii register and wait for transfer to end 136 * with timeout (average measured transfer time is 25us) 137 */ 138 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) 139 { 140 int limit; 141 142 /* make sure mii interrupt status is cleared */ 143 enet_writel(priv, ENET_IR_MII, ENET_IR_REG); 144 145 enet_writel(priv, data, ENET_MIIDATA_REG); 146 wmb(); 147 148 /* busy wait on mii interrupt bit, with timeout */ 149 limit = 1000; 150 do { 151 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 152 break; 153 udelay(1); 154 } while (limit-- > 0); 155 156 return (limit < 0) ? 1 : 0; 157 } 158 159 /* 160 * MII internal read callback 161 */ 162 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, 163 int regnum) 164 { 165 u32 tmp, val; 166 167 tmp = regnum << ENET_MIIDATA_REG_SHIFT; 168 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 169 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 170 tmp |= ENET_MIIDATA_OP_READ_MASK; 171 172 if (do_mdio_op(priv, tmp)) 173 return -1; 174 175 val = enet_readl(priv, ENET_MIIDATA_REG); 176 val &= 0xffff; 177 return val; 178 } 179 180 /* 181 * MII internal write callback 182 */ 183 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, 184 int regnum, u16 value) 185 { 186 u32 tmp; 187 188 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; 189 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 190 tmp |= regnum << ENET_MIIDATA_REG_SHIFT; 191 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 192 tmp |= ENET_MIIDATA_OP_WRITE_MASK; 193 194 (void)do_mdio_op(priv, tmp); 195 return 0; 196 } 197 198 /* 199 * MII read callback from phylib 200 */ 201 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, 202 int regnum) 203 { 204 return bcm_enet_mdio_read(bus->priv, mii_id, regnum); 205 } 206 207 /* 208 * MII write callback from phylib 209 */ 210 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, 211 int regnum, u16 value) 212 { 213 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); 214 } 215 216 /* 217 * MII read callback from mii core 218 */ 219 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, 220 int regnum) 221 { 222 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); 223 } 224 225 /* 226 * MII write callback from mii core 227 */ 228 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, 229 int regnum, int value) 230 { 231 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); 232 } 233 234 /* 235 * refill rx queue 236 */ 237 static int bcm_enet_refill_rx(struct net_device *dev) 238 { 239 struct bcm_enet_priv *priv; 240 241 priv = netdev_priv(dev); 242 243 while (priv->rx_desc_count < priv->rx_ring_size) { 244 struct bcm_enet_desc *desc; 245 struct sk_buff *skb; 246 dma_addr_t p; 247 int desc_idx; 248 u32 len_stat; 249 250 desc_idx = priv->rx_dirty_desc; 251 desc = &priv->rx_desc_cpu[desc_idx]; 252 253 if (!priv->rx_skb[desc_idx]) { 254 skb = netdev_alloc_skb(dev, priv->rx_skb_size); 255 if (!skb) 256 break; 257 priv->rx_skb[desc_idx] = skb; 258 p = dma_map_single(&priv->pdev->dev, skb->data, 259 priv->rx_skb_size, 260 DMA_FROM_DEVICE); 261 desc->address = p; 262 } 263 264 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; 265 len_stat |= DMADESC_OWNER_MASK; 266 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 267 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 268 priv->rx_dirty_desc = 0; 269 } else { 270 priv->rx_dirty_desc++; 271 } 272 wmb(); 273 desc->len_stat = len_stat; 274 275 priv->rx_desc_count++; 276 277 /* tell dma engine we allocated one buffer */ 278 if (priv->dma_has_sram) 279 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 280 else 281 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); 282 } 283 284 /* If rx ring is still empty, set a timer to try allocating 285 * again at a later time. */ 286 if (priv->rx_desc_count == 0 && netif_running(dev)) { 287 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); 288 priv->rx_timeout.expires = jiffies + HZ; 289 add_timer(&priv->rx_timeout); 290 } 291 292 return 0; 293 } 294 295 /* 296 * timer callback to defer refill rx queue in case we're OOM 297 */ 298 static void bcm_enet_refill_rx_timer(struct timer_list *t) 299 { 300 struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout); 301 struct net_device *dev = priv->net_dev; 302 303 spin_lock(&priv->rx_lock); 304 bcm_enet_refill_rx(dev); 305 spin_unlock(&priv->rx_lock); 306 } 307 308 /* 309 * extract packet from rx queue 310 */ 311 static int bcm_enet_receive_queue(struct net_device *dev, int budget) 312 { 313 struct bcm_enet_priv *priv; 314 struct device *kdev; 315 int processed; 316 317 priv = netdev_priv(dev); 318 kdev = &priv->pdev->dev; 319 processed = 0; 320 321 /* don't scan ring further than number of refilled 322 * descriptor */ 323 if (budget > priv->rx_desc_count) 324 budget = priv->rx_desc_count; 325 326 do { 327 struct bcm_enet_desc *desc; 328 struct sk_buff *skb; 329 int desc_idx; 330 u32 len_stat; 331 unsigned int len; 332 333 desc_idx = priv->rx_curr_desc; 334 desc = &priv->rx_desc_cpu[desc_idx]; 335 336 /* make sure we actually read the descriptor status at 337 * each loop */ 338 rmb(); 339 340 len_stat = desc->len_stat; 341 342 /* break if dma ownership belongs to hw */ 343 if (len_stat & DMADESC_OWNER_MASK) 344 break; 345 346 processed++; 347 priv->rx_curr_desc++; 348 if (priv->rx_curr_desc == priv->rx_ring_size) 349 priv->rx_curr_desc = 0; 350 priv->rx_desc_count--; 351 352 /* if the packet does not have start of packet _and_ 353 * end of packet flag set, then just recycle it */ 354 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != 355 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { 356 dev->stats.rx_dropped++; 357 continue; 358 } 359 360 /* recycle packet if it's marked as bad */ 361 if (!priv->enet_is_sw && 362 unlikely(len_stat & DMADESC_ERR_MASK)) { 363 dev->stats.rx_errors++; 364 365 if (len_stat & DMADESC_OVSIZE_MASK) 366 dev->stats.rx_length_errors++; 367 if (len_stat & DMADESC_CRC_MASK) 368 dev->stats.rx_crc_errors++; 369 if (len_stat & DMADESC_UNDER_MASK) 370 dev->stats.rx_frame_errors++; 371 if (len_stat & DMADESC_OV_MASK) 372 dev->stats.rx_fifo_errors++; 373 continue; 374 } 375 376 /* valid packet */ 377 skb = priv->rx_skb[desc_idx]; 378 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; 379 /* don't include FCS */ 380 len -= 4; 381 382 if (len < copybreak) { 383 struct sk_buff *nskb; 384 385 nskb = napi_alloc_skb(&priv->napi, len); 386 if (!nskb) { 387 /* forget packet, just rearm desc */ 388 dev->stats.rx_dropped++; 389 continue; 390 } 391 392 dma_sync_single_for_cpu(kdev, desc->address, 393 len, DMA_FROM_DEVICE); 394 memcpy(nskb->data, skb->data, len); 395 dma_sync_single_for_device(kdev, desc->address, 396 len, DMA_FROM_DEVICE); 397 skb = nskb; 398 } else { 399 dma_unmap_single(&priv->pdev->dev, desc->address, 400 priv->rx_skb_size, DMA_FROM_DEVICE); 401 priv->rx_skb[desc_idx] = NULL; 402 } 403 404 skb_put(skb, len); 405 skb->protocol = eth_type_trans(skb, dev); 406 dev->stats.rx_packets++; 407 dev->stats.rx_bytes += len; 408 netif_receive_skb(skb); 409 410 } while (--budget > 0); 411 412 if (processed || !priv->rx_desc_count) { 413 bcm_enet_refill_rx(dev); 414 415 /* kick rx dma */ 416 enet_dmac_writel(priv, priv->dma_chan_en_mask, 417 ENETDMAC_CHANCFG, priv->rx_chan); 418 } 419 420 return processed; 421 } 422 423 424 /* 425 * try to or force reclaim of transmitted buffers 426 */ 427 static int bcm_enet_tx_reclaim(struct net_device *dev, int force) 428 { 429 struct bcm_enet_priv *priv; 430 int released; 431 432 priv = netdev_priv(dev); 433 released = 0; 434 435 while (priv->tx_desc_count < priv->tx_ring_size) { 436 struct bcm_enet_desc *desc; 437 struct sk_buff *skb; 438 439 /* We run in a bh and fight against start_xmit, which 440 * is called with bh disabled */ 441 spin_lock(&priv->tx_lock); 442 443 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; 444 445 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { 446 spin_unlock(&priv->tx_lock); 447 break; 448 } 449 450 /* ensure other field of the descriptor were not read 451 * before we checked ownership */ 452 rmb(); 453 454 skb = priv->tx_skb[priv->tx_dirty_desc]; 455 priv->tx_skb[priv->tx_dirty_desc] = NULL; 456 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, 457 DMA_TO_DEVICE); 458 459 priv->tx_dirty_desc++; 460 if (priv->tx_dirty_desc == priv->tx_ring_size) 461 priv->tx_dirty_desc = 0; 462 priv->tx_desc_count++; 463 464 spin_unlock(&priv->tx_lock); 465 466 if (desc->len_stat & DMADESC_UNDER_MASK) 467 dev->stats.tx_errors++; 468 469 dev_kfree_skb(skb); 470 released++; 471 } 472 473 if (netif_queue_stopped(dev) && released) 474 netif_wake_queue(dev); 475 476 return released; 477 } 478 479 /* 480 * poll func, called by network core 481 */ 482 static int bcm_enet_poll(struct napi_struct *napi, int budget) 483 { 484 struct bcm_enet_priv *priv; 485 struct net_device *dev; 486 int rx_work_done; 487 488 priv = container_of(napi, struct bcm_enet_priv, napi); 489 dev = priv->net_dev; 490 491 /* ack interrupts */ 492 enet_dmac_writel(priv, priv->dma_chan_int_mask, 493 ENETDMAC_IR, priv->rx_chan); 494 enet_dmac_writel(priv, priv->dma_chan_int_mask, 495 ENETDMAC_IR, priv->tx_chan); 496 497 /* reclaim sent skb */ 498 bcm_enet_tx_reclaim(dev, 0); 499 500 spin_lock(&priv->rx_lock); 501 rx_work_done = bcm_enet_receive_queue(dev, budget); 502 spin_unlock(&priv->rx_lock); 503 504 if (rx_work_done >= budget) { 505 /* rx queue is not yet empty/clean */ 506 return rx_work_done; 507 } 508 509 /* no more packet in rx/tx queue, remove device from poll 510 * queue */ 511 napi_complete_done(napi, rx_work_done); 512 513 /* restore rx/tx interrupt */ 514 enet_dmac_writel(priv, priv->dma_chan_int_mask, 515 ENETDMAC_IRMASK, priv->rx_chan); 516 enet_dmac_writel(priv, priv->dma_chan_int_mask, 517 ENETDMAC_IRMASK, priv->tx_chan); 518 519 return rx_work_done; 520 } 521 522 /* 523 * mac interrupt handler 524 */ 525 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) 526 { 527 struct net_device *dev; 528 struct bcm_enet_priv *priv; 529 u32 stat; 530 531 dev = dev_id; 532 priv = netdev_priv(dev); 533 534 stat = enet_readl(priv, ENET_IR_REG); 535 if (!(stat & ENET_IR_MIB)) 536 return IRQ_NONE; 537 538 /* clear & mask interrupt */ 539 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 540 enet_writel(priv, 0, ENET_IRMASK_REG); 541 542 /* read mib registers in workqueue */ 543 schedule_work(&priv->mib_update_task); 544 545 return IRQ_HANDLED; 546 } 547 548 /* 549 * rx/tx dma interrupt handler 550 */ 551 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) 552 { 553 struct net_device *dev; 554 struct bcm_enet_priv *priv; 555 556 dev = dev_id; 557 priv = netdev_priv(dev); 558 559 /* mask rx/tx interrupts */ 560 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 561 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 562 563 napi_schedule(&priv->napi); 564 565 return IRQ_HANDLED; 566 } 567 568 /* 569 * tx request callback 570 */ 571 static netdev_tx_t 572 bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 573 { 574 struct bcm_enet_priv *priv; 575 struct bcm_enet_desc *desc; 576 u32 len_stat; 577 netdev_tx_t ret; 578 579 priv = netdev_priv(dev); 580 581 /* lock against tx reclaim */ 582 spin_lock(&priv->tx_lock); 583 584 /* make sure the tx hw queue is not full, should not happen 585 * since we stop queue before it's the case */ 586 if (unlikely(!priv->tx_desc_count)) { 587 netif_stop_queue(dev); 588 dev_err(&priv->pdev->dev, "xmit called with no tx desc " 589 "available?\n"); 590 ret = NETDEV_TX_BUSY; 591 goto out_unlock; 592 } 593 594 /* pad small packets sent on a switch device */ 595 if (priv->enet_is_sw && skb->len < 64) { 596 int needed = 64 - skb->len; 597 char *data; 598 599 if (unlikely(skb_tailroom(skb) < needed)) { 600 struct sk_buff *nskb; 601 602 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); 603 if (!nskb) { 604 ret = NETDEV_TX_BUSY; 605 goto out_unlock; 606 } 607 dev_kfree_skb(skb); 608 skb = nskb; 609 } 610 data = skb_put_zero(skb, needed); 611 } 612 613 /* point to the next available desc */ 614 desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 615 priv->tx_skb[priv->tx_curr_desc] = skb; 616 617 /* fill descriptor */ 618 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, 619 DMA_TO_DEVICE); 620 621 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 622 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | 623 DMADESC_APPEND_CRC | 624 DMADESC_OWNER_MASK; 625 626 priv->tx_curr_desc++; 627 if (priv->tx_curr_desc == priv->tx_ring_size) { 628 priv->tx_curr_desc = 0; 629 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 630 } 631 priv->tx_desc_count--; 632 633 /* dma might be already polling, make sure we update desc 634 * fields in correct order */ 635 wmb(); 636 desc->len_stat = len_stat; 637 wmb(); 638 639 /* kick tx dma */ 640 enet_dmac_writel(priv, priv->dma_chan_en_mask, 641 ENETDMAC_CHANCFG, priv->tx_chan); 642 643 /* stop queue if no more desc available */ 644 if (!priv->tx_desc_count) 645 netif_stop_queue(dev); 646 647 dev->stats.tx_bytes += skb->len; 648 dev->stats.tx_packets++; 649 ret = NETDEV_TX_OK; 650 651 out_unlock: 652 spin_unlock(&priv->tx_lock); 653 return ret; 654 } 655 656 /* 657 * Change the interface's mac address. 658 */ 659 static int bcm_enet_set_mac_address(struct net_device *dev, void *p) 660 { 661 struct bcm_enet_priv *priv; 662 struct sockaddr *addr = p; 663 u32 val; 664 665 priv = netdev_priv(dev); 666 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 667 668 /* use perfect match register 0 to store my mac address */ 669 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | 670 (dev->dev_addr[4] << 8) | dev->dev_addr[5]; 671 enet_writel(priv, val, ENET_PML_REG(0)); 672 673 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); 674 val |= ENET_PMH_DATAVALID_MASK; 675 enet_writel(priv, val, ENET_PMH_REG(0)); 676 677 return 0; 678 } 679 680 /* 681 * Change rx mode (promiscuous/allmulti) and update multicast list 682 */ 683 static void bcm_enet_set_multicast_list(struct net_device *dev) 684 { 685 struct bcm_enet_priv *priv; 686 struct netdev_hw_addr *ha; 687 u32 val; 688 int i; 689 690 priv = netdev_priv(dev); 691 692 val = enet_readl(priv, ENET_RXCFG_REG); 693 694 if (dev->flags & IFF_PROMISC) 695 val |= ENET_RXCFG_PROMISC_MASK; 696 else 697 val &= ~ENET_RXCFG_PROMISC_MASK; 698 699 /* only 3 perfect match registers left, first one is used for 700 * own mac address */ 701 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) 702 val |= ENET_RXCFG_ALLMCAST_MASK; 703 else 704 val &= ~ENET_RXCFG_ALLMCAST_MASK; 705 706 /* no need to set perfect match registers if we catch all 707 * multicast */ 708 if (val & ENET_RXCFG_ALLMCAST_MASK) { 709 enet_writel(priv, val, ENET_RXCFG_REG); 710 return; 711 } 712 713 i = 0; 714 netdev_for_each_mc_addr(ha, dev) { 715 u8 *dmi_addr; 716 u32 tmp; 717 718 if (i == 3) 719 break; 720 /* update perfect match registers */ 721 dmi_addr = ha->addr; 722 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 723 (dmi_addr[4] << 8) | dmi_addr[5]; 724 enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 725 726 tmp = (dmi_addr[0] << 8 | dmi_addr[1]); 727 tmp |= ENET_PMH_DATAVALID_MASK; 728 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); 729 } 730 731 for (; i < 3; i++) { 732 enet_writel(priv, 0, ENET_PML_REG(i + 1)); 733 enet_writel(priv, 0, ENET_PMH_REG(i + 1)); 734 } 735 736 enet_writel(priv, val, ENET_RXCFG_REG); 737 } 738 739 /* 740 * set mac duplex parameters 741 */ 742 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) 743 { 744 u32 val; 745 746 val = enet_readl(priv, ENET_TXCTL_REG); 747 if (fullduplex) 748 val |= ENET_TXCTL_FD_MASK; 749 else 750 val &= ~ENET_TXCTL_FD_MASK; 751 enet_writel(priv, val, ENET_TXCTL_REG); 752 } 753 754 /* 755 * set mac flow control parameters 756 */ 757 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) 758 { 759 u32 val; 760 761 /* rx flow control (pause frame handling) */ 762 val = enet_readl(priv, ENET_RXCFG_REG); 763 if (rx_en) 764 val |= ENET_RXCFG_ENFLOW_MASK; 765 else 766 val &= ~ENET_RXCFG_ENFLOW_MASK; 767 enet_writel(priv, val, ENET_RXCFG_REG); 768 769 if (!priv->dma_has_sram) 770 return; 771 772 /* tx flow control (pause frame generation) */ 773 val = enet_dma_readl(priv, ENETDMA_CFG_REG); 774 if (tx_en) 775 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 776 else 777 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 778 enet_dma_writel(priv, val, ENETDMA_CFG_REG); 779 } 780 781 /* 782 * link changed callback (from phylib) 783 */ 784 static void bcm_enet_adjust_phy_link(struct net_device *dev) 785 { 786 struct bcm_enet_priv *priv; 787 struct phy_device *phydev; 788 int status_changed; 789 790 priv = netdev_priv(dev); 791 phydev = dev->phydev; 792 status_changed = 0; 793 794 if (priv->old_link != phydev->link) { 795 status_changed = 1; 796 priv->old_link = phydev->link; 797 } 798 799 /* reflect duplex change in mac configuration */ 800 if (phydev->link && phydev->duplex != priv->old_duplex) { 801 bcm_enet_set_duplex(priv, 802 (phydev->duplex == DUPLEX_FULL) ? 1 : 0); 803 status_changed = 1; 804 priv->old_duplex = phydev->duplex; 805 } 806 807 /* enable flow control if remote advertise it (trust phylib to 808 * check that duplex is full */ 809 if (phydev->link && phydev->pause != priv->old_pause) { 810 int rx_pause_en, tx_pause_en; 811 812 if (phydev->pause) { 813 /* pause was advertised by lpa and us */ 814 rx_pause_en = 1; 815 tx_pause_en = 1; 816 } else if (!priv->pause_auto) { 817 /* pause setting overridden by user */ 818 rx_pause_en = priv->pause_rx; 819 tx_pause_en = priv->pause_tx; 820 } else { 821 rx_pause_en = 0; 822 tx_pause_en = 0; 823 } 824 825 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); 826 status_changed = 1; 827 priv->old_pause = phydev->pause; 828 } 829 830 if (status_changed) { 831 pr_info("%s: link %s", dev->name, phydev->link ? 832 "UP" : "DOWN"); 833 if (phydev->link) 834 pr_cont(" - %d/%s - flow control %s", phydev->speed, 835 DUPLEX_FULL == phydev->duplex ? "full" : "half", 836 phydev->pause == 1 ? "rx&tx" : "off"); 837 838 pr_cont("\n"); 839 } 840 } 841 842 /* 843 * link changed callback (if phylib is not used) 844 */ 845 static void bcm_enet_adjust_link(struct net_device *dev) 846 { 847 struct bcm_enet_priv *priv; 848 849 priv = netdev_priv(dev); 850 bcm_enet_set_duplex(priv, priv->force_duplex_full); 851 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); 852 netif_carrier_on(dev); 853 854 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", 855 dev->name, 856 priv->force_speed_100 ? 100 : 10, 857 priv->force_duplex_full ? "full" : "half", 858 priv->pause_rx ? "rx" : "off", 859 priv->pause_tx ? "tx" : "off"); 860 } 861 862 /* 863 * open callback, allocate dma rings & buffers and start rx operation 864 */ 865 static int bcm_enet_open(struct net_device *dev) 866 { 867 struct bcm_enet_priv *priv; 868 struct sockaddr addr; 869 struct device *kdev; 870 struct phy_device *phydev; 871 int i, ret; 872 unsigned int size; 873 char phy_id[MII_BUS_ID_SIZE + 3]; 874 void *p; 875 u32 val; 876 877 priv = netdev_priv(dev); 878 kdev = &priv->pdev->dev; 879 880 if (priv->has_phy) { 881 /* connect to PHY */ 882 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 883 priv->mii_bus->id, priv->phy_id); 884 885 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 886 PHY_INTERFACE_MODE_MII); 887 888 if (IS_ERR(phydev)) { 889 dev_err(kdev, "could not attach to PHY\n"); 890 return PTR_ERR(phydev); 891 } 892 893 /* mask with MAC supported features */ 894 phy_support_sym_pause(phydev); 895 phy_set_max_speed(phydev, SPEED_100); 896 phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, 897 priv->pause_auto); 898 899 phy_attached_info(phydev); 900 901 priv->old_link = 0; 902 priv->old_duplex = -1; 903 priv->old_pause = -1; 904 } else { 905 phydev = NULL; 906 } 907 908 /* mask all interrupts and request them */ 909 enet_writel(priv, 0, ENET_IRMASK_REG); 910 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 911 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 912 913 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 914 if (ret) 915 goto out_phy_disconnect; 916 917 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, 918 dev->name, dev); 919 if (ret) 920 goto out_freeirq; 921 922 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 923 0, dev->name, dev); 924 if (ret) 925 goto out_freeirq_rx; 926 927 /* initialize perfect match registers */ 928 for (i = 0; i < 4; i++) { 929 enet_writel(priv, 0, ENET_PML_REG(i)); 930 enet_writel(priv, 0, ENET_PMH_REG(i)); 931 } 932 933 /* write device mac address */ 934 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); 935 bcm_enet_set_mac_address(dev, &addr); 936 937 /* allocate rx dma ring */ 938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 939 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 940 if (!p) { 941 ret = -ENOMEM; 942 goto out_freeirq_tx; 943 } 944 945 priv->rx_desc_alloc_size = size; 946 priv->rx_desc_cpu = p; 947 948 /* allocate tx dma ring */ 949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 950 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 951 if (!p) { 952 ret = -ENOMEM; 953 goto out_free_rx_ring; 954 } 955 956 priv->tx_desc_alloc_size = size; 957 priv->tx_desc_cpu = p; 958 959 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), 960 GFP_KERNEL); 961 if (!priv->tx_skb) { 962 ret = -ENOMEM; 963 goto out_free_tx_ring; 964 } 965 966 priv->tx_desc_count = priv->tx_ring_size; 967 priv->tx_dirty_desc = 0; 968 priv->tx_curr_desc = 0; 969 spin_lock_init(&priv->tx_lock); 970 971 /* init & fill rx ring with skbs */ 972 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), 973 GFP_KERNEL); 974 if (!priv->rx_skb) { 975 ret = -ENOMEM; 976 goto out_free_tx_skb; 977 } 978 979 priv->rx_desc_count = 0; 980 priv->rx_dirty_desc = 0; 981 priv->rx_curr_desc = 0; 982 983 /* initialize flow control buffer allocation */ 984 if (priv->dma_has_sram) 985 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 986 ENETDMA_BUFALLOC_REG(priv->rx_chan)); 987 else 988 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 989 ENETDMAC_BUFALLOC, priv->rx_chan); 990 991 if (bcm_enet_refill_rx(dev)) { 992 dev_err(kdev, "cannot allocate rx skb queue\n"); 993 ret = -ENOMEM; 994 goto out; 995 } 996 997 /* write rx & tx ring addresses */ 998 if (priv->dma_has_sram) { 999 enet_dmas_writel(priv, priv->rx_desc_dma, 1000 ENETDMAS_RSTART_REG, priv->rx_chan); 1001 enet_dmas_writel(priv, priv->tx_desc_dma, 1002 ENETDMAS_RSTART_REG, priv->tx_chan); 1003 } else { 1004 enet_dmac_writel(priv, priv->rx_desc_dma, 1005 ENETDMAC_RSTART, priv->rx_chan); 1006 enet_dmac_writel(priv, priv->tx_desc_dma, 1007 ENETDMAC_RSTART, priv->tx_chan); 1008 } 1009 1010 /* clear remaining state ram for rx & tx channel */ 1011 if (priv->dma_has_sram) { 1012 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 1013 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 1014 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 1015 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 1016 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 1017 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 1018 } else { 1019 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); 1020 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); 1021 } 1022 1023 /* set max rx/tx length */ 1024 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 1025 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 1026 1027 /* set dma maximum burst len */ 1028 enet_dmac_writel(priv, priv->dma_maxburst, 1029 ENETDMAC_MAXBURST, priv->rx_chan); 1030 enet_dmac_writel(priv, priv->dma_maxburst, 1031 ENETDMAC_MAXBURST, priv->tx_chan); 1032 1033 /* set correct transmit fifo watermark */ 1034 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 1035 1036 /* set flow control low/high threshold to 1/3 / 2/3 */ 1037 if (priv->dma_has_sram) { 1038 val = priv->rx_ring_size / 3; 1039 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 1040 val = (priv->rx_ring_size * 2) / 3; 1041 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 1042 } else { 1043 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); 1044 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); 1045 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); 1046 } 1047 1048 /* all set, enable mac and interrupts, start dma engine and 1049 * kick rx dma channel */ 1050 wmb(); 1051 val = enet_readl(priv, ENET_CTL_REG); 1052 val |= ENET_CTL_ENABLE_MASK; 1053 enet_writel(priv, val, ENET_CTL_REG); 1054 if (priv->dma_has_sram) 1055 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 1056 enet_dmac_writel(priv, priv->dma_chan_en_mask, 1057 ENETDMAC_CHANCFG, priv->rx_chan); 1058 1059 /* watch "mib counters about to overflow" interrupt */ 1060 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 1061 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1062 1063 /* watch "packet transferred" interrupt in rx and tx */ 1064 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1065 ENETDMAC_IR, priv->rx_chan); 1066 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1067 ENETDMAC_IR, priv->tx_chan); 1068 1069 /* make sure we enable napi before rx interrupt */ 1070 napi_enable(&priv->napi); 1071 1072 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1073 ENETDMAC_IRMASK, priv->rx_chan); 1074 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1075 ENETDMAC_IRMASK, priv->tx_chan); 1076 1077 if (phydev) 1078 phy_start(phydev); 1079 else 1080 bcm_enet_adjust_link(dev); 1081 1082 netif_start_queue(dev); 1083 return 0; 1084 1085 out: 1086 for (i = 0; i < priv->rx_ring_size; i++) { 1087 struct bcm_enet_desc *desc; 1088 1089 if (!priv->rx_skb[i]) 1090 continue; 1091 1092 desc = &priv->rx_desc_cpu[i]; 1093 dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1094 DMA_FROM_DEVICE); 1095 kfree_skb(priv->rx_skb[i]); 1096 } 1097 kfree(priv->rx_skb); 1098 1099 out_free_tx_skb: 1100 kfree(priv->tx_skb); 1101 1102 out_free_tx_ring: 1103 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1104 priv->tx_desc_cpu, priv->tx_desc_dma); 1105 1106 out_free_rx_ring: 1107 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1108 priv->rx_desc_cpu, priv->rx_desc_dma); 1109 1110 out_freeirq_tx: 1111 free_irq(priv->irq_tx, dev); 1112 1113 out_freeirq_rx: 1114 free_irq(priv->irq_rx, dev); 1115 1116 out_freeirq: 1117 free_irq(dev->irq, dev); 1118 1119 out_phy_disconnect: 1120 if (phydev) 1121 phy_disconnect(phydev); 1122 1123 return ret; 1124 } 1125 1126 /* 1127 * disable mac 1128 */ 1129 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) 1130 { 1131 int limit; 1132 u32 val; 1133 1134 val = enet_readl(priv, ENET_CTL_REG); 1135 val |= ENET_CTL_DISABLE_MASK; 1136 enet_writel(priv, val, ENET_CTL_REG); 1137 1138 limit = 1000; 1139 do { 1140 u32 val; 1141 1142 val = enet_readl(priv, ENET_CTL_REG); 1143 if (!(val & ENET_CTL_DISABLE_MASK)) 1144 break; 1145 udelay(1); 1146 } while (limit--); 1147 } 1148 1149 /* 1150 * disable dma in given channel 1151 */ 1152 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) 1153 { 1154 int limit; 1155 1156 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); 1157 1158 limit = 1000; 1159 do { 1160 u32 val; 1161 1162 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); 1163 if (!(val & ENETDMAC_CHANCFG_EN_MASK)) 1164 break; 1165 udelay(1); 1166 } while (limit--); 1167 } 1168 1169 /* 1170 * stop callback 1171 */ 1172 static int bcm_enet_stop(struct net_device *dev) 1173 { 1174 struct bcm_enet_priv *priv; 1175 struct device *kdev; 1176 int i; 1177 1178 priv = netdev_priv(dev); 1179 kdev = &priv->pdev->dev; 1180 1181 netif_stop_queue(dev); 1182 napi_disable(&priv->napi); 1183 if (priv->has_phy) 1184 phy_stop(dev->phydev); 1185 del_timer_sync(&priv->rx_timeout); 1186 1187 /* mask all interrupts */ 1188 enet_writel(priv, 0, ENET_IRMASK_REG); 1189 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 1190 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 1191 1192 /* make sure no mib update is scheduled */ 1193 cancel_work_sync(&priv->mib_update_task); 1194 1195 /* disable dma & mac */ 1196 bcm_enet_disable_dma(priv, priv->tx_chan); 1197 bcm_enet_disable_dma(priv, priv->rx_chan); 1198 bcm_enet_disable_mac(priv); 1199 1200 /* force reclaim of all tx buffers */ 1201 bcm_enet_tx_reclaim(dev, 1); 1202 1203 /* free the rx skb ring */ 1204 for (i = 0; i < priv->rx_ring_size; i++) { 1205 struct bcm_enet_desc *desc; 1206 1207 if (!priv->rx_skb[i]) 1208 continue; 1209 1210 desc = &priv->rx_desc_cpu[i]; 1211 dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1212 DMA_FROM_DEVICE); 1213 kfree_skb(priv->rx_skb[i]); 1214 } 1215 1216 /* free remaining allocated memory */ 1217 kfree(priv->rx_skb); 1218 kfree(priv->tx_skb); 1219 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1220 priv->rx_desc_cpu, priv->rx_desc_dma); 1221 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1222 priv->tx_desc_cpu, priv->tx_desc_dma); 1223 free_irq(priv->irq_tx, dev); 1224 free_irq(priv->irq_rx, dev); 1225 free_irq(dev->irq, dev); 1226 1227 /* release phy */ 1228 if (priv->has_phy) 1229 phy_disconnect(dev->phydev); 1230 1231 return 0; 1232 } 1233 1234 /* 1235 * ethtool callbacks 1236 */ 1237 struct bcm_enet_stats { 1238 char stat_string[ETH_GSTRING_LEN]; 1239 int sizeof_stat; 1240 int stat_offset; 1241 int mib_reg; 1242 }; 1243 1244 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1245 offsetof(struct bcm_enet_priv, m) 1246 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ 1247 offsetof(struct net_device_stats, m) 1248 1249 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1250 { "rx_packets", DEV_STAT(rx_packets), -1 }, 1251 { "tx_packets", DEV_STAT(tx_packets), -1 }, 1252 { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 1253 { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 1254 { "rx_errors", DEV_STAT(rx_errors), -1 }, 1255 { "tx_errors", DEV_STAT(tx_errors), -1 }, 1256 { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 1257 { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 1258 1259 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1260 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1261 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, 1262 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, 1263 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, 1264 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, 1265 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, 1266 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, 1267 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, 1268 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, 1269 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, 1270 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, 1271 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, 1272 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, 1273 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, 1274 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, 1275 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, 1276 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, 1277 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, 1278 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, 1279 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, 1280 1281 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, 1282 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, 1283 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, 1284 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, 1285 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, 1286 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, 1287 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, 1288 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, 1289 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, 1290 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, 1291 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, 1292 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, 1293 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, 1294 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, 1295 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, 1296 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, 1297 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, 1298 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, 1299 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, 1300 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, 1301 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, 1302 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, 1303 1304 }; 1305 1306 #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) 1307 1308 static const u32 unused_mib_regs[] = { 1309 ETH_MIB_TX_ALL_OCTETS, 1310 ETH_MIB_TX_ALL_PKTS, 1311 ETH_MIB_RX_ALL_OCTETS, 1312 ETH_MIB_RX_ALL_PKTS, 1313 }; 1314 1315 1316 static void bcm_enet_get_drvinfo(struct net_device *netdev, 1317 struct ethtool_drvinfo *drvinfo) 1318 { 1319 strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); 1320 strlcpy(drvinfo->version, bcm_enet_driver_version, 1321 sizeof(drvinfo->version)); 1322 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 1323 strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); 1324 } 1325 1326 static int bcm_enet_get_sset_count(struct net_device *netdev, 1327 int string_set) 1328 { 1329 switch (string_set) { 1330 case ETH_SS_STATS: 1331 return BCM_ENET_STATS_LEN; 1332 default: 1333 return -EINVAL; 1334 } 1335 } 1336 1337 static void bcm_enet_get_strings(struct net_device *netdev, 1338 u32 stringset, u8 *data) 1339 { 1340 int i; 1341 1342 switch (stringset) { 1343 case ETH_SS_STATS: 1344 for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1345 memcpy(data + i * ETH_GSTRING_LEN, 1346 bcm_enet_gstrings_stats[i].stat_string, 1347 ETH_GSTRING_LEN); 1348 } 1349 break; 1350 } 1351 } 1352 1353 static void update_mib_counters(struct bcm_enet_priv *priv) 1354 { 1355 int i; 1356 1357 for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1358 const struct bcm_enet_stats *s; 1359 u32 val; 1360 char *p; 1361 1362 s = &bcm_enet_gstrings_stats[i]; 1363 if (s->mib_reg == -1) 1364 continue; 1365 1366 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); 1367 p = (char *)priv + s->stat_offset; 1368 1369 if (s->sizeof_stat == sizeof(u64)) 1370 *(u64 *)p += val; 1371 else 1372 *(u32 *)p += val; 1373 } 1374 1375 /* also empty unused mib counters to make sure mib counter 1376 * overflow interrupt is cleared */ 1377 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) 1378 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); 1379 } 1380 1381 static void bcm_enet_update_mib_counters_defer(struct work_struct *t) 1382 { 1383 struct bcm_enet_priv *priv; 1384 1385 priv = container_of(t, struct bcm_enet_priv, mib_update_task); 1386 mutex_lock(&priv->mib_update_lock); 1387 update_mib_counters(priv); 1388 mutex_unlock(&priv->mib_update_lock); 1389 1390 /* reenable mib interrupt */ 1391 if (netif_running(priv->net_dev)) 1392 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1393 } 1394 1395 static void bcm_enet_get_ethtool_stats(struct net_device *netdev, 1396 struct ethtool_stats *stats, 1397 u64 *data) 1398 { 1399 struct bcm_enet_priv *priv; 1400 int i; 1401 1402 priv = netdev_priv(netdev); 1403 1404 mutex_lock(&priv->mib_update_lock); 1405 update_mib_counters(priv); 1406 1407 for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1408 const struct bcm_enet_stats *s; 1409 char *p; 1410 1411 s = &bcm_enet_gstrings_stats[i]; 1412 if (s->mib_reg == -1) 1413 p = (char *)&netdev->stats; 1414 else 1415 p = (char *)priv; 1416 p += s->stat_offset; 1417 data[i] = (s->sizeof_stat == sizeof(u64)) ? 1418 *(u64 *)p : *(u32 *)p; 1419 } 1420 mutex_unlock(&priv->mib_update_lock); 1421 } 1422 1423 static int bcm_enet_nway_reset(struct net_device *dev) 1424 { 1425 struct bcm_enet_priv *priv; 1426 1427 priv = netdev_priv(dev); 1428 if (priv->has_phy) 1429 return phy_ethtool_nway_reset(dev); 1430 1431 return -EOPNOTSUPP; 1432 } 1433 1434 static int bcm_enet_get_link_ksettings(struct net_device *dev, 1435 struct ethtool_link_ksettings *cmd) 1436 { 1437 struct bcm_enet_priv *priv; 1438 u32 supported, advertising; 1439 1440 priv = netdev_priv(dev); 1441 1442 if (priv->has_phy) { 1443 if (!dev->phydev) 1444 return -ENODEV; 1445 1446 phy_ethtool_ksettings_get(dev->phydev, cmd); 1447 1448 return 0; 1449 } else { 1450 cmd->base.autoneg = 0; 1451 cmd->base.speed = (priv->force_speed_100) ? 1452 SPEED_100 : SPEED_10; 1453 cmd->base.duplex = (priv->force_duplex_full) ? 1454 DUPLEX_FULL : DUPLEX_HALF; 1455 supported = ADVERTISED_10baseT_Half | 1456 ADVERTISED_10baseT_Full | 1457 ADVERTISED_100baseT_Half | 1458 ADVERTISED_100baseT_Full; 1459 advertising = 0; 1460 ethtool_convert_legacy_u32_to_link_mode( 1461 cmd->link_modes.supported, supported); 1462 ethtool_convert_legacy_u32_to_link_mode( 1463 cmd->link_modes.advertising, advertising); 1464 cmd->base.port = PORT_MII; 1465 } 1466 return 0; 1467 } 1468 1469 static int bcm_enet_set_link_ksettings(struct net_device *dev, 1470 const struct ethtool_link_ksettings *cmd) 1471 { 1472 struct bcm_enet_priv *priv; 1473 1474 priv = netdev_priv(dev); 1475 if (priv->has_phy) { 1476 if (!dev->phydev) 1477 return -ENODEV; 1478 return phy_ethtool_ksettings_set(dev->phydev, cmd); 1479 } else { 1480 1481 if (cmd->base.autoneg || 1482 (cmd->base.speed != SPEED_100 && 1483 cmd->base.speed != SPEED_10) || 1484 cmd->base.port != PORT_MII) 1485 return -EINVAL; 1486 1487 priv->force_speed_100 = 1488 (cmd->base.speed == SPEED_100) ? 1 : 0; 1489 priv->force_duplex_full = 1490 (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0; 1491 1492 if (netif_running(dev)) 1493 bcm_enet_adjust_link(dev); 1494 return 0; 1495 } 1496 } 1497 1498 static void bcm_enet_get_ringparam(struct net_device *dev, 1499 struct ethtool_ringparam *ering) 1500 { 1501 struct bcm_enet_priv *priv; 1502 1503 priv = netdev_priv(dev); 1504 1505 /* rx/tx ring is actually only limited by memory */ 1506 ering->rx_max_pending = 8192; 1507 ering->tx_max_pending = 8192; 1508 ering->rx_pending = priv->rx_ring_size; 1509 ering->tx_pending = priv->tx_ring_size; 1510 } 1511 1512 static int bcm_enet_set_ringparam(struct net_device *dev, 1513 struct ethtool_ringparam *ering) 1514 { 1515 struct bcm_enet_priv *priv; 1516 int was_running; 1517 1518 priv = netdev_priv(dev); 1519 1520 was_running = 0; 1521 if (netif_running(dev)) { 1522 bcm_enet_stop(dev); 1523 was_running = 1; 1524 } 1525 1526 priv->rx_ring_size = ering->rx_pending; 1527 priv->tx_ring_size = ering->tx_pending; 1528 1529 if (was_running) { 1530 int err; 1531 1532 err = bcm_enet_open(dev); 1533 if (err) 1534 dev_close(dev); 1535 else 1536 bcm_enet_set_multicast_list(dev); 1537 } 1538 return 0; 1539 } 1540 1541 static void bcm_enet_get_pauseparam(struct net_device *dev, 1542 struct ethtool_pauseparam *ecmd) 1543 { 1544 struct bcm_enet_priv *priv; 1545 1546 priv = netdev_priv(dev); 1547 ecmd->autoneg = priv->pause_auto; 1548 ecmd->rx_pause = priv->pause_rx; 1549 ecmd->tx_pause = priv->pause_tx; 1550 } 1551 1552 static int bcm_enet_set_pauseparam(struct net_device *dev, 1553 struct ethtool_pauseparam *ecmd) 1554 { 1555 struct bcm_enet_priv *priv; 1556 1557 priv = netdev_priv(dev); 1558 1559 if (priv->has_phy) { 1560 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { 1561 /* asymetric pause mode not supported, 1562 * actually possible but integrated PHY has RO 1563 * asym_pause bit */ 1564 return -EINVAL; 1565 } 1566 } else { 1567 /* no pause autoneg on direct mii connection */ 1568 if (ecmd->autoneg) 1569 return -EINVAL; 1570 } 1571 1572 priv->pause_auto = ecmd->autoneg; 1573 priv->pause_rx = ecmd->rx_pause; 1574 priv->pause_tx = ecmd->tx_pause; 1575 1576 return 0; 1577 } 1578 1579 static const struct ethtool_ops bcm_enet_ethtool_ops = { 1580 .get_strings = bcm_enet_get_strings, 1581 .get_sset_count = bcm_enet_get_sset_count, 1582 .get_ethtool_stats = bcm_enet_get_ethtool_stats, 1583 .nway_reset = bcm_enet_nway_reset, 1584 .get_drvinfo = bcm_enet_get_drvinfo, 1585 .get_link = ethtool_op_get_link, 1586 .get_ringparam = bcm_enet_get_ringparam, 1587 .set_ringparam = bcm_enet_set_ringparam, 1588 .get_pauseparam = bcm_enet_get_pauseparam, 1589 .set_pauseparam = bcm_enet_set_pauseparam, 1590 .get_link_ksettings = bcm_enet_get_link_ksettings, 1591 .set_link_ksettings = bcm_enet_set_link_ksettings, 1592 }; 1593 1594 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1595 { 1596 struct bcm_enet_priv *priv; 1597 1598 priv = netdev_priv(dev); 1599 if (priv->has_phy) { 1600 if (!dev->phydev) 1601 return -ENODEV; 1602 return phy_mii_ioctl(dev->phydev, rq, cmd); 1603 } else { 1604 struct mii_if_info mii; 1605 1606 mii.dev = dev; 1607 mii.mdio_read = bcm_enet_mdio_read_mii; 1608 mii.mdio_write = bcm_enet_mdio_write_mii; 1609 mii.phy_id = 0; 1610 mii.phy_id_mask = 0x3f; 1611 mii.reg_num_mask = 0x1f; 1612 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 1613 } 1614 } 1615 1616 /* 1617 * adjust mtu, can't be called while device is running 1618 */ 1619 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) 1620 { 1621 struct bcm_enet_priv *priv = netdev_priv(dev); 1622 int actual_mtu = new_mtu; 1623 1624 if (netif_running(dev)) 1625 return -EBUSY; 1626 1627 /* add ethernet header + vlan tag size */ 1628 actual_mtu += VLAN_ETH_HLEN; 1629 1630 /* 1631 * setup maximum size before we get overflow mark in 1632 * descriptor, note that this will not prevent reception of 1633 * big frames, they will be split into multiple buffers 1634 * anyway 1635 */ 1636 priv->hw_mtu = actual_mtu; 1637 1638 /* 1639 * align rx buffer size to dma burst len, account FCS since 1640 * it's appended 1641 */ 1642 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, 1643 priv->dma_maxburst * 4); 1644 1645 dev->mtu = new_mtu; 1646 return 0; 1647 } 1648 1649 /* 1650 * preinit hardware to allow mii operation while device is down 1651 */ 1652 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) 1653 { 1654 u32 val; 1655 int limit; 1656 1657 /* make sure mac is disabled */ 1658 bcm_enet_disable_mac(priv); 1659 1660 /* soft reset mac */ 1661 val = ENET_CTL_SRESET_MASK; 1662 enet_writel(priv, val, ENET_CTL_REG); 1663 wmb(); 1664 1665 limit = 1000; 1666 do { 1667 val = enet_readl(priv, ENET_CTL_REG); 1668 if (!(val & ENET_CTL_SRESET_MASK)) 1669 break; 1670 udelay(1); 1671 } while (limit--); 1672 1673 /* select correct mii interface */ 1674 val = enet_readl(priv, ENET_CTL_REG); 1675 if (priv->use_external_mii) 1676 val |= ENET_CTL_EPHYSEL_MASK; 1677 else 1678 val &= ~ENET_CTL_EPHYSEL_MASK; 1679 enet_writel(priv, val, ENET_CTL_REG); 1680 1681 /* turn on mdc clock */ 1682 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | 1683 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); 1684 1685 /* set mib counters to self-clear when read */ 1686 val = enet_readl(priv, ENET_MIBCTL_REG); 1687 val |= ENET_MIBCTL_RDCLEAR_MASK; 1688 enet_writel(priv, val, ENET_MIBCTL_REG); 1689 } 1690 1691 static const struct net_device_ops bcm_enet_ops = { 1692 .ndo_open = bcm_enet_open, 1693 .ndo_stop = bcm_enet_stop, 1694 .ndo_start_xmit = bcm_enet_start_xmit, 1695 .ndo_set_mac_address = bcm_enet_set_mac_address, 1696 .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1697 .ndo_do_ioctl = bcm_enet_ioctl, 1698 .ndo_change_mtu = bcm_enet_change_mtu, 1699 }; 1700 1701 /* 1702 * allocate netdevice, request register memory and register device. 1703 */ 1704 static int bcm_enet_probe(struct platform_device *pdev) 1705 { 1706 struct bcm_enet_priv *priv; 1707 struct net_device *dev; 1708 struct bcm63xx_enet_platform_data *pd; 1709 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; 1710 struct mii_bus *bus; 1711 int i, ret; 1712 1713 if (!bcm_enet_shared_base[0]) 1714 return -EPROBE_DEFER; 1715 1716 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1717 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1718 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); 1719 if (!res_irq || !res_irq_rx || !res_irq_tx) 1720 return -ENODEV; 1721 1722 ret = 0; 1723 dev = alloc_etherdev(sizeof(*priv)); 1724 if (!dev) 1725 return -ENOMEM; 1726 priv = netdev_priv(dev); 1727 1728 priv->enet_is_sw = false; 1729 priv->dma_maxburst = BCMENET_DMA_MAXBURST; 1730 1731 ret = bcm_enet_change_mtu(dev, dev->mtu); 1732 if (ret) 1733 goto out; 1734 1735 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1736 priv->base = devm_ioremap_resource(&pdev->dev, res_mem); 1737 if (IS_ERR(priv->base)) { 1738 ret = PTR_ERR(priv->base); 1739 goto out; 1740 } 1741 1742 dev->irq = priv->irq = res_irq->start; 1743 priv->irq_rx = res_irq_rx->start; 1744 priv->irq_tx = res_irq_tx->start; 1745 1746 priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); 1747 if (IS_ERR(priv->mac_clk)) { 1748 ret = PTR_ERR(priv->mac_clk); 1749 goto out; 1750 } 1751 ret = clk_prepare_enable(priv->mac_clk); 1752 if (ret) 1753 goto out; 1754 1755 /* initialize default and fetch platform data */ 1756 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1757 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1758 1759 pd = dev_get_platdata(&pdev->dev); 1760 if (pd) { 1761 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 1762 priv->has_phy = pd->has_phy; 1763 priv->phy_id = pd->phy_id; 1764 priv->has_phy_interrupt = pd->has_phy_interrupt; 1765 priv->phy_interrupt = pd->phy_interrupt; 1766 priv->use_external_mii = !pd->use_internal_phy; 1767 priv->pause_auto = pd->pause_auto; 1768 priv->pause_rx = pd->pause_rx; 1769 priv->pause_tx = pd->pause_tx; 1770 priv->force_duplex_full = pd->force_duplex_full; 1771 priv->force_speed_100 = pd->force_speed_100; 1772 priv->dma_chan_en_mask = pd->dma_chan_en_mask; 1773 priv->dma_chan_int_mask = pd->dma_chan_int_mask; 1774 priv->dma_chan_width = pd->dma_chan_width; 1775 priv->dma_has_sram = pd->dma_has_sram; 1776 priv->dma_desc_shift = pd->dma_desc_shift; 1777 priv->rx_chan = pd->rx_chan; 1778 priv->tx_chan = pd->tx_chan; 1779 } 1780 1781 if (priv->has_phy && !priv->use_external_mii) { 1782 /* using internal PHY, enable clock */ 1783 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); 1784 if (IS_ERR(priv->phy_clk)) { 1785 ret = PTR_ERR(priv->phy_clk); 1786 priv->phy_clk = NULL; 1787 goto out_disable_clk_mac; 1788 } 1789 ret = clk_prepare_enable(priv->phy_clk); 1790 if (ret) 1791 goto out_disable_clk_mac; 1792 } 1793 1794 /* do minimal hardware init to be able to probe mii bus */ 1795 bcm_enet_hw_preinit(priv); 1796 1797 /* MII bus registration */ 1798 if (priv->has_phy) { 1799 1800 priv->mii_bus = mdiobus_alloc(); 1801 if (!priv->mii_bus) { 1802 ret = -ENOMEM; 1803 goto out_uninit_hw; 1804 } 1805 1806 bus = priv->mii_bus; 1807 bus->name = "bcm63xx_enet MII bus"; 1808 bus->parent = &pdev->dev; 1809 bus->priv = priv; 1810 bus->read = bcm_enet_mdio_read_phylib; 1811 bus->write = bcm_enet_mdio_write_phylib; 1812 sprintf(bus->id, "%s-%d", pdev->name, pdev->id); 1813 1814 /* only probe bus where we think the PHY is, because 1815 * the mdio read operation return 0 instead of 0xffff 1816 * if a slave is not present on hw */ 1817 bus->phy_mask = ~(1 << priv->phy_id); 1818 1819 if (priv->has_phy_interrupt) 1820 bus->irq[priv->phy_id] = priv->phy_interrupt; 1821 1822 ret = mdiobus_register(bus); 1823 if (ret) { 1824 dev_err(&pdev->dev, "unable to register mdio bus\n"); 1825 goto out_free_mdio; 1826 } 1827 } else { 1828 1829 /* run platform code to initialize PHY device */ 1830 if (pd && pd->mii_config && 1831 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, 1832 bcm_enet_mdio_write_mii)) { 1833 dev_err(&pdev->dev, "unable to configure mdio bus\n"); 1834 goto out_uninit_hw; 1835 } 1836 } 1837 1838 spin_lock_init(&priv->rx_lock); 1839 1840 /* init rx timeout (used for oom) */ 1841 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 1842 1843 /* init the mib update lock&work */ 1844 mutex_init(&priv->mib_update_lock); 1845 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); 1846 1847 /* zero mib counters */ 1848 for (i = 0; i < ENET_MIB_REG_COUNT; i++) 1849 enet_writel(priv, 0, ENET_MIB_REG(i)); 1850 1851 /* register netdevice */ 1852 dev->netdev_ops = &bcm_enet_ops; 1853 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 1854 1855 dev->ethtool_ops = &bcm_enet_ethtool_ops; 1856 /* MTU range: 46 - 2028 */ 1857 dev->min_mtu = ETH_ZLEN - ETH_HLEN; 1858 dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN; 1859 SET_NETDEV_DEV(dev, &pdev->dev); 1860 1861 ret = register_netdev(dev); 1862 if (ret) 1863 goto out_unregister_mdio; 1864 1865 netif_carrier_off(dev); 1866 platform_set_drvdata(pdev, dev); 1867 priv->pdev = pdev; 1868 priv->net_dev = dev; 1869 1870 return 0; 1871 1872 out_unregister_mdio: 1873 if (priv->mii_bus) 1874 mdiobus_unregister(priv->mii_bus); 1875 1876 out_free_mdio: 1877 if (priv->mii_bus) 1878 mdiobus_free(priv->mii_bus); 1879 1880 out_uninit_hw: 1881 /* turn off mdc clock */ 1882 enet_writel(priv, 0, ENET_MIISC_REG); 1883 clk_disable_unprepare(priv->phy_clk); 1884 1885 out_disable_clk_mac: 1886 clk_disable_unprepare(priv->mac_clk); 1887 out: 1888 free_netdev(dev); 1889 return ret; 1890 } 1891 1892 1893 /* 1894 * exit func, stops hardware and unregisters netdevice 1895 */ 1896 static int bcm_enet_remove(struct platform_device *pdev) 1897 { 1898 struct bcm_enet_priv *priv; 1899 struct net_device *dev; 1900 1901 /* stop netdevice */ 1902 dev = platform_get_drvdata(pdev); 1903 priv = netdev_priv(dev); 1904 unregister_netdev(dev); 1905 1906 /* turn off mdc clock */ 1907 enet_writel(priv, 0, ENET_MIISC_REG); 1908 1909 if (priv->has_phy) { 1910 mdiobus_unregister(priv->mii_bus); 1911 mdiobus_free(priv->mii_bus); 1912 } else { 1913 struct bcm63xx_enet_platform_data *pd; 1914 1915 pd = dev_get_platdata(&pdev->dev); 1916 if (pd && pd->mii_config) 1917 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1918 bcm_enet_mdio_write_mii); 1919 } 1920 1921 /* disable hw block clocks */ 1922 clk_disable_unprepare(priv->phy_clk); 1923 clk_disable_unprepare(priv->mac_clk); 1924 1925 free_netdev(dev); 1926 return 0; 1927 } 1928 1929 struct platform_driver bcm63xx_enet_driver = { 1930 .probe = bcm_enet_probe, 1931 .remove = bcm_enet_remove, 1932 .driver = { 1933 .name = "bcm63xx_enet", 1934 .owner = THIS_MODULE, 1935 }, 1936 }; 1937 1938 /* 1939 * switch mii access callbacks 1940 */ 1941 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, 1942 int ext, int phy_id, int location) 1943 { 1944 u32 reg; 1945 int ret; 1946 1947 spin_lock_bh(&priv->enetsw_mdio_lock); 1948 enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 1949 1950 reg = ENETSW_MDIOC_RD_MASK | 1951 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 1952 (location << ENETSW_MDIOC_REG_SHIFT); 1953 1954 if (ext) 1955 reg |= ENETSW_MDIOC_EXT_MASK; 1956 1957 enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 1958 udelay(50); 1959 ret = enetsw_readw(priv, ENETSW_MDIOD_REG); 1960 spin_unlock_bh(&priv->enetsw_mdio_lock); 1961 return ret; 1962 } 1963 1964 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, 1965 int ext, int phy_id, int location, 1966 uint16_t data) 1967 { 1968 u32 reg; 1969 1970 spin_lock_bh(&priv->enetsw_mdio_lock); 1971 enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 1972 1973 reg = ENETSW_MDIOC_WR_MASK | 1974 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 1975 (location << ENETSW_MDIOC_REG_SHIFT); 1976 1977 if (ext) 1978 reg |= ENETSW_MDIOC_EXT_MASK; 1979 1980 reg |= data; 1981 1982 enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 1983 udelay(50); 1984 spin_unlock_bh(&priv->enetsw_mdio_lock); 1985 } 1986 1987 static inline int bcm_enet_port_is_rgmii(int portid) 1988 { 1989 return portid >= ENETSW_RGMII_PORT0; 1990 } 1991 1992 /* 1993 * enet sw PHY polling 1994 */ 1995 static void swphy_poll_timer(struct timer_list *t) 1996 { 1997 struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll); 1998 unsigned int i; 1999 2000 for (i = 0; i < priv->num_ports; i++) { 2001 struct bcm63xx_enetsw_port *port; 2002 int val, j, up, advertise, lpa, speed, duplex, media; 2003 int external_phy = bcm_enet_port_is_rgmii(i); 2004 u8 override; 2005 2006 port = &priv->used_ports[i]; 2007 if (!port->used) 2008 continue; 2009 2010 if (port->bypass_link) 2011 continue; 2012 2013 /* dummy read to clear */ 2014 for (j = 0; j < 2; j++) 2015 val = bcmenet_sw_mdio_read(priv, external_phy, 2016 port->phy_id, MII_BMSR); 2017 2018 if (val == 0xffff) 2019 continue; 2020 2021 up = (val & BMSR_LSTATUS) ? 1 : 0; 2022 if (!(up ^ priv->sw_port_link[i])) 2023 continue; 2024 2025 priv->sw_port_link[i] = up; 2026 2027 /* link changed */ 2028 if (!up) { 2029 dev_info(&priv->pdev->dev, "link DOWN on %s\n", 2030 port->name); 2031 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 2032 ENETSW_PORTOV_REG(i)); 2033 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 2034 ENETSW_PTCTRL_TXDIS_MASK, 2035 ENETSW_PTCTRL_REG(i)); 2036 continue; 2037 } 2038 2039 advertise = bcmenet_sw_mdio_read(priv, external_phy, 2040 port->phy_id, MII_ADVERTISE); 2041 2042 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, 2043 MII_LPA); 2044 2045 /* figure out media and duplex from advertise and LPA values */ 2046 media = mii_nway_result(lpa & advertise); 2047 duplex = (media & ADVERTISE_FULL) ? 1 : 0; 2048 2049 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) 2050 speed = 100; 2051 else 2052 speed = 10; 2053 2054 if (val & BMSR_ESTATEN) { 2055 advertise = bcmenet_sw_mdio_read(priv, external_phy, 2056 port->phy_id, MII_CTRL1000); 2057 2058 lpa = bcmenet_sw_mdio_read(priv, external_phy, 2059 port->phy_id, MII_STAT1000); 2060 2061 if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) 2062 && lpa & (LPA_1000FULL | LPA_1000HALF)) { 2063 speed = 1000; 2064 duplex = (lpa & LPA_1000FULL); 2065 } 2066 } 2067 2068 dev_info(&priv->pdev->dev, 2069 "link UP on %s, %dMbps, %s-duplex\n", 2070 port->name, speed, duplex ? "full" : "half"); 2071 2072 override = ENETSW_PORTOV_ENABLE_MASK | 2073 ENETSW_PORTOV_LINKUP_MASK; 2074 2075 if (speed == 1000) 2076 override |= ENETSW_IMPOV_1000_MASK; 2077 else if (speed == 100) 2078 override |= ENETSW_IMPOV_100_MASK; 2079 if (duplex) 2080 override |= ENETSW_IMPOV_FDX_MASK; 2081 2082 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 2083 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 2084 } 2085 2086 priv->swphy_poll.expires = jiffies + HZ; 2087 add_timer(&priv->swphy_poll); 2088 } 2089 2090 /* 2091 * open callback, allocate dma rings & buffers and start rx operation 2092 */ 2093 static int bcm_enetsw_open(struct net_device *dev) 2094 { 2095 struct bcm_enet_priv *priv; 2096 struct device *kdev; 2097 int i, ret; 2098 unsigned int size; 2099 void *p; 2100 u32 val; 2101 2102 priv = netdev_priv(dev); 2103 kdev = &priv->pdev->dev; 2104 2105 /* mask all interrupts and request them */ 2106 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 2107 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 2108 2109 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 2110 0, dev->name, dev); 2111 if (ret) 2112 goto out_freeirq; 2113 2114 if (priv->irq_tx != -1) { 2115 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 2116 0, dev->name, dev); 2117 if (ret) 2118 goto out_freeirq_rx; 2119 } 2120 2121 /* allocate rx dma ring */ 2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2123 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2124 if (!p) { 2125 dev_err(kdev, "cannot allocate rx ring %u\n", size); 2126 ret = -ENOMEM; 2127 goto out_freeirq_tx; 2128 } 2129 2130 priv->rx_desc_alloc_size = size; 2131 priv->rx_desc_cpu = p; 2132 2133 /* allocate tx dma ring */ 2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2135 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2136 if (!p) { 2137 dev_err(kdev, "cannot allocate tx ring\n"); 2138 ret = -ENOMEM; 2139 goto out_free_rx_ring; 2140 } 2141 2142 priv->tx_desc_alloc_size = size; 2143 priv->tx_desc_cpu = p; 2144 2145 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), 2146 GFP_KERNEL); 2147 if (!priv->tx_skb) { 2148 dev_err(kdev, "cannot allocate rx skb queue\n"); 2149 ret = -ENOMEM; 2150 goto out_free_tx_ring; 2151 } 2152 2153 priv->tx_desc_count = priv->tx_ring_size; 2154 priv->tx_dirty_desc = 0; 2155 priv->tx_curr_desc = 0; 2156 spin_lock_init(&priv->tx_lock); 2157 2158 /* init & fill rx ring with skbs */ 2159 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), 2160 GFP_KERNEL); 2161 if (!priv->rx_skb) { 2162 dev_err(kdev, "cannot allocate rx skb queue\n"); 2163 ret = -ENOMEM; 2164 goto out_free_tx_skb; 2165 } 2166 2167 priv->rx_desc_count = 0; 2168 priv->rx_dirty_desc = 0; 2169 priv->rx_curr_desc = 0; 2170 2171 /* disable all ports */ 2172 for (i = 0; i < priv->num_ports; i++) { 2173 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 2174 ENETSW_PORTOV_REG(i)); 2175 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 2176 ENETSW_PTCTRL_TXDIS_MASK, 2177 ENETSW_PTCTRL_REG(i)); 2178 2179 priv->sw_port_link[i] = 0; 2180 } 2181 2182 /* reset mib */ 2183 val = enetsw_readb(priv, ENETSW_GMCR_REG); 2184 val |= ENETSW_GMCR_RST_MIB_MASK; 2185 enetsw_writeb(priv, val, ENETSW_GMCR_REG); 2186 mdelay(1); 2187 val &= ~ENETSW_GMCR_RST_MIB_MASK; 2188 enetsw_writeb(priv, val, ENETSW_GMCR_REG); 2189 mdelay(1); 2190 2191 /* force CPU port state */ 2192 val = enetsw_readb(priv, ENETSW_IMPOV_REG); 2193 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; 2194 enetsw_writeb(priv, val, ENETSW_IMPOV_REG); 2195 2196 /* enable switch forward engine */ 2197 val = enetsw_readb(priv, ENETSW_SWMODE_REG); 2198 val |= ENETSW_SWMODE_FWD_EN_MASK; 2199 enetsw_writeb(priv, val, ENETSW_SWMODE_REG); 2200 2201 /* enable jumbo on all ports */ 2202 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); 2203 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); 2204 2205 /* initialize flow control buffer allocation */ 2206 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 2207 ENETDMA_BUFALLOC_REG(priv->rx_chan)); 2208 2209 if (bcm_enet_refill_rx(dev)) { 2210 dev_err(kdev, "cannot allocate rx skb queue\n"); 2211 ret = -ENOMEM; 2212 goto out; 2213 } 2214 2215 /* write rx & tx ring addresses */ 2216 enet_dmas_writel(priv, priv->rx_desc_dma, 2217 ENETDMAS_RSTART_REG, priv->rx_chan); 2218 enet_dmas_writel(priv, priv->tx_desc_dma, 2219 ENETDMAS_RSTART_REG, priv->tx_chan); 2220 2221 /* clear remaining state ram for rx & tx channel */ 2222 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 2223 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 2224 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 2225 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 2226 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 2227 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 2228 2229 /* set dma maximum burst len */ 2230 enet_dmac_writel(priv, priv->dma_maxburst, 2231 ENETDMAC_MAXBURST, priv->rx_chan); 2232 enet_dmac_writel(priv, priv->dma_maxburst, 2233 ENETDMAC_MAXBURST, priv->tx_chan); 2234 2235 /* set flow control low/high threshold to 1/3 / 2/3 */ 2236 val = priv->rx_ring_size / 3; 2237 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 2238 val = (priv->rx_ring_size * 2) / 3; 2239 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 2240 2241 /* all set, enable mac and interrupts, start dma engine and 2242 * kick rx dma channel 2243 */ 2244 wmb(); 2245 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 2246 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 2247 ENETDMAC_CHANCFG, priv->rx_chan); 2248 2249 /* watch "packet transferred" interrupt in rx and tx */ 2250 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2251 ENETDMAC_IR, priv->rx_chan); 2252 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2253 ENETDMAC_IR, priv->tx_chan); 2254 2255 /* make sure we enable napi before rx interrupt */ 2256 napi_enable(&priv->napi); 2257 2258 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2259 ENETDMAC_IRMASK, priv->rx_chan); 2260 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2261 ENETDMAC_IRMASK, priv->tx_chan); 2262 2263 netif_carrier_on(dev); 2264 netif_start_queue(dev); 2265 2266 /* apply override config for bypass_link ports here. */ 2267 for (i = 0; i < priv->num_ports; i++) { 2268 struct bcm63xx_enetsw_port *port; 2269 u8 override; 2270 port = &priv->used_ports[i]; 2271 if (!port->used) 2272 continue; 2273 2274 if (!port->bypass_link) 2275 continue; 2276 2277 override = ENETSW_PORTOV_ENABLE_MASK | 2278 ENETSW_PORTOV_LINKUP_MASK; 2279 2280 switch (port->force_speed) { 2281 case 1000: 2282 override |= ENETSW_IMPOV_1000_MASK; 2283 break; 2284 case 100: 2285 override |= ENETSW_IMPOV_100_MASK; 2286 break; 2287 case 10: 2288 break; 2289 default: 2290 pr_warn("invalid forced speed on port %s: assume 10\n", 2291 port->name); 2292 break; 2293 } 2294 2295 if (port->force_duplex_full) 2296 override |= ENETSW_IMPOV_FDX_MASK; 2297 2298 2299 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 2300 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 2301 } 2302 2303 /* start phy polling timer */ 2304 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); 2305 mod_timer(&priv->swphy_poll, jiffies); 2306 return 0; 2307 2308 out: 2309 for (i = 0; i < priv->rx_ring_size; i++) { 2310 struct bcm_enet_desc *desc; 2311 2312 if (!priv->rx_skb[i]) 2313 continue; 2314 2315 desc = &priv->rx_desc_cpu[i]; 2316 dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 2317 DMA_FROM_DEVICE); 2318 kfree_skb(priv->rx_skb[i]); 2319 } 2320 kfree(priv->rx_skb); 2321 2322 out_free_tx_skb: 2323 kfree(priv->tx_skb); 2324 2325 out_free_tx_ring: 2326 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 2327 priv->tx_desc_cpu, priv->tx_desc_dma); 2328 2329 out_free_rx_ring: 2330 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 2331 priv->rx_desc_cpu, priv->rx_desc_dma); 2332 2333 out_freeirq_tx: 2334 if (priv->irq_tx != -1) 2335 free_irq(priv->irq_tx, dev); 2336 2337 out_freeirq_rx: 2338 free_irq(priv->irq_rx, dev); 2339 2340 out_freeirq: 2341 return ret; 2342 } 2343 2344 /* stop callback */ 2345 static int bcm_enetsw_stop(struct net_device *dev) 2346 { 2347 struct bcm_enet_priv *priv; 2348 struct device *kdev; 2349 int i; 2350 2351 priv = netdev_priv(dev); 2352 kdev = &priv->pdev->dev; 2353 2354 del_timer_sync(&priv->swphy_poll); 2355 netif_stop_queue(dev); 2356 napi_disable(&priv->napi); 2357 del_timer_sync(&priv->rx_timeout); 2358 2359 /* mask all interrupts */ 2360 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 2361 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 2362 2363 /* disable dma & mac */ 2364 bcm_enet_disable_dma(priv, priv->tx_chan); 2365 bcm_enet_disable_dma(priv, priv->rx_chan); 2366 2367 /* force reclaim of all tx buffers */ 2368 bcm_enet_tx_reclaim(dev, 1); 2369 2370 /* free the rx skb ring */ 2371 for (i = 0; i < priv->rx_ring_size; i++) { 2372 struct bcm_enet_desc *desc; 2373 2374 if (!priv->rx_skb[i]) 2375 continue; 2376 2377 desc = &priv->rx_desc_cpu[i]; 2378 dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 2379 DMA_FROM_DEVICE); 2380 kfree_skb(priv->rx_skb[i]); 2381 } 2382 2383 /* free remaining allocated memory */ 2384 kfree(priv->rx_skb); 2385 kfree(priv->tx_skb); 2386 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 2387 priv->rx_desc_cpu, priv->rx_desc_dma); 2388 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 2389 priv->tx_desc_cpu, priv->tx_desc_dma); 2390 if (priv->irq_tx != -1) 2391 free_irq(priv->irq_tx, dev); 2392 free_irq(priv->irq_rx, dev); 2393 2394 return 0; 2395 } 2396 2397 /* try to sort out phy external status by walking the used_port field 2398 * in the bcm_enet_priv structure. in case the phy address is not 2399 * assigned to any physical port on the switch, assume it is external 2400 * (and yell at the user). 2401 */ 2402 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) 2403 { 2404 int i; 2405 2406 for (i = 0; i < priv->num_ports; ++i) { 2407 if (!priv->used_ports[i].used) 2408 continue; 2409 if (priv->used_ports[i].phy_id == phy_id) 2410 return bcm_enet_port_is_rgmii(i); 2411 } 2412 2413 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", 2414 phy_id); 2415 return 1; 2416 } 2417 2418 /* can't use bcmenet_sw_mdio_read directly as we need to sort out 2419 * external/internal status of the given phy_id first. 2420 */ 2421 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, 2422 int location) 2423 { 2424 struct bcm_enet_priv *priv; 2425 2426 priv = netdev_priv(dev); 2427 return bcmenet_sw_mdio_read(priv, 2428 bcm_enetsw_phy_is_external(priv, phy_id), 2429 phy_id, location); 2430 } 2431 2432 /* can't use bcmenet_sw_mdio_write directly as we need to sort out 2433 * external/internal status of the given phy_id first. 2434 */ 2435 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, 2436 int location, 2437 int val) 2438 { 2439 struct bcm_enet_priv *priv; 2440 2441 priv = netdev_priv(dev); 2442 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), 2443 phy_id, location, val); 2444 } 2445 2446 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2447 { 2448 struct mii_if_info mii; 2449 2450 mii.dev = dev; 2451 mii.mdio_read = bcm_enetsw_mii_mdio_read; 2452 mii.mdio_write = bcm_enetsw_mii_mdio_write; 2453 mii.phy_id = 0; 2454 mii.phy_id_mask = 0x3f; 2455 mii.reg_num_mask = 0x1f; 2456 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 2457 2458 } 2459 2460 static const struct net_device_ops bcm_enetsw_ops = { 2461 .ndo_open = bcm_enetsw_open, 2462 .ndo_stop = bcm_enetsw_stop, 2463 .ndo_start_xmit = bcm_enet_start_xmit, 2464 .ndo_change_mtu = bcm_enet_change_mtu, 2465 .ndo_do_ioctl = bcm_enetsw_ioctl, 2466 }; 2467 2468 2469 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { 2470 { "rx_packets", DEV_STAT(rx_packets), -1 }, 2471 { "tx_packets", DEV_STAT(tx_packets), -1 }, 2472 { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 2473 { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 2474 { "rx_errors", DEV_STAT(rx_errors), -1 }, 2475 { "tx_errors", DEV_STAT(tx_errors), -1 }, 2476 { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 2477 { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 2478 2479 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, 2480 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, 2481 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, 2482 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, 2483 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, 2484 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, 2485 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, 2486 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, 2487 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, 2488 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), 2489 ETHSW_MIB_RX_1024_1522 }, 2490 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), 2491 ETHSW_MIB_RX_1523_2047 }, 2492 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), 2493 ETHSW_MIB_RX_2048_4095 }, 2494 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), 2495 ETHSW_MIB_RX_4096_8191 }, 2496 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), 2497 ETHSW_MIB_RX_8192_9728 }, 2498 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, 2499 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, 2500 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, 2501 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, 2502 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, 2503 2504 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, 2505 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, 2506 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, 2507 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, 2508 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, 2509 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, 2510 2511 }; 2512 2513 #define BCM_ENETSW_STATS_LEN \ 2514 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) 2515 2516 static void bcm_enetsw_get_strings(struct net_device *netdev, 2517 u32 stringset, u8 *data) 2518 { 2519 int i; 2520 2521 switch (stringset) { 2522 case ETH_SS_STATS: 2523 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 2524 memcpy(data + i * ETH_GSTRING_LEN, 2525 bcm_enetsw_gstrings_stats[i].stat_string, 2526 ETH_GSTRING_LEN); 2527 } 2528 break; 2529 } 2530 } 2531 2532 static int bcm_enetsw_get_sset_count(struct net_device *netdev, 2533 int string_set) 2534 { 2535 switch (string_set) { 2536 case ETH_SS_STATS: 2537 return BCM_ENETSW_STATS_LEN; 2538 default: 2539 return -EINVAL; 2540 } 2541 } 2542 2543 static void bcm_enetsw_get_drvinfo(struct net_device *netdev, 2544 struct ethtool_drvinfo *drvinfo) 2545 { 2546 strncpy(drvinfo->driver, bcm_enet_driver_name, 32); 2547 strncpy(drvinfo->version, bcm_enet_driver_version, 32); 2548 strncpy(drvinfo->fw_version, "N/A", 32); 2549 strncpy(drvinfo->bus_info, "bcm63xx", 32); 2550 } 2551 2552 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, 2553 struct ethtool_stats *stats, 2554 u64 *data) 2555 { 2556 struct bcm_enet_priv *priv; 2557 int i; 2558 2559 priv = netdev_priv(netdev); 2560 2561 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 2562 const struct bcm_enet_stats *s; 2563 u32 lo, hi; 2564 char *p; 2565 int reg; 2566 2567 s = &bcm_enetsw_gstrings_stats[i]; 2568 2569 reg = s->mib_reg; 2570 if (reg == -1) 2571 continue; 2572 2573 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); 2574 p = (char *)priv + s->stat_offset; 2575 2576 if (s->sizeof_stat == sizeof(u64)) { 2577 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); 2578 *(u64 *)p = ((u64)hi << 32 | lo); 2579 } else { 2580 *(u32 *)p = lo; 2581 } 2582 } 2583 2584 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 2585 const struct bcm_enet_stats *s; 2586 char *p; 2587 2588 s = &bcm_enetsw_gstrings_stats[i]; 2589 2590 if (s->mib_reg == -1) 2591 p = (char *)&netdev->stats + s->stat_offset; 2592 else 2593 p = (char *)priv + s->stat_offset; 2594 2595 data[i] = (s->sizeof_stat == sizeof(u64)) ? 2596 *(u64 *)p : *(u32 *)p; 2597 } 2598 } 2599 2600 static void bcm_enetsw_get_ringparam(struct net_device *dev, 2601 struct ethtool_ringparam *ering) 2602 { 2603 struct bcm_enet_priv *priv; 2604 2605 priv = netdev_priv(dev); 2606 2607 /* rx/tx ring is actually only limited by memory */ 2608 ering->rx_max_pending = 8192; 2609 ering->tx_max_pending = 8192; 2610 ering->rx_mini_max_pending = 0; 2611 ering->rx_jumbo_max_pending = 0; 2612 ering->rx_pending = priv->rx_ring_size; 2613 ering->tx_pending = priv->tx_ring_size; 2614 } 2615 2616 static int bcm_enetsw_set_ringparam(struct net_device *dev, 2617 struct ethtool_ringparam *ering) 2618 { 2619 struct bcm_enet_priv *priv; 2620 int was_running; 2621 2622 priv = netdev_priv(dev); 2623 2624 was_running = 0; 2625 if (netif_running(dev)) { 2626 bcm_enetsw_stop(dev); 2627 was_running = 1; 2628 } 2629 2630 priv->rx_ring_size = ering->rx_pending; 2631 priv->tx_ring_size = ering->tx_pending; 2632 2633 if (was_running) { 2634 int err; 2635 2636 err = bcm_enetsw_open(dev); 2637 if (err) 2638 dev_close(dev); 2639 } 2640 return 0; 2641 } 2642 2643 static const struct ethtool_ops bcm_enetsw_ethtool_ops = { 2644 .get_strings = bcm_enetsw_get_strings, 2645 .get_sset_count = bcm_enetsw_get_sset_count, 2646 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, 2647 .get_drvinfo = bcm_enetsw_get_drvinfo, 2648 .get_ringparam = bcm_enetsw_get_ringparam, 2649 .set_ringparam = bcm_enetsw_set_ringparam, 2650 }; 2651 2652 /* allocate netdevice, request register memory and register device. */ 2653 static int bcm_enetsw_probe(struct platform_device *pdev) 2654 { 2655 struct bcm_enet_priv *priv; 2656 struct net_device *dev; 2657 struct bcm63xx_enetsw_platform_data *pd; 2658 struct resource *res_mem; 2659 int ret, irq_rx, irq_tx; 2660 2661 if (!bcm_enet_shared_base[0]) 2662 return -EPROBE_DEFER; 2663 2664 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2665 irq_rx = platform_get_irq(pdev, 0); 2666 irq_tx = platform_get_irq(pdev, 1); 2667 if (!res_mem || irq_rx < 0) 2668 return -ENODEV; 2669 2670 ret = 0; 2671 dev = alloc_etherdev(sizeof(*priv)); 2672 if (!dev) 2673 return -ENOMEM; 2674 priv = netdev_priv(dev); 2675 memset(priv, 0, sizeof(*priv)); 2676 2677 /* initialize default and fetch platform data */ 2678 priv->enet_is_sw = true; 2679 priv->irq_rx = irq_rx; 2680 priv->irq_tx = irq_tx; 2681 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 2682 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 2683 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; 2684 2685 pd = dev_get_platdata(&pdev->dev); 2686 if (pd) { 2687 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 2688 memcpy(priv->used_ports, pd->used_ports, 2689 sizeof(pd->used_ports)); 2690 priv->num_ports = pd->num_ports; 2691 priv->dma_has_sram = pd->dma_has_sram; 2692 priv->dma_chan_en_mask = pd->dma_chan_en_mask; 2693 priv->dma_chan_int_mask = pd->dma_chan_int_mask; 2694 priv->dma_chan_width = pd->dma_chan_width; 2695 } 2696 2697 ret = bcm_enet_change_mtu(dev, dev->mtu); 2698 if (ret) 2699 goto out; 2700 2701 priv->base = devm_ioremap_resource(&pdev->dev, res_mem); 2702 if (IS_ERR(priv->base)) { 2703 ret = PTR_ERR(priv->base); 2704 goto out; 2705 } 2706 2707 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw"); 2708 if (IS_ERR(priv->mac_clk)) { 2709 ret = PTR_ERR(priv->mac_clk); 2710 goto out; 2711 } 2712 ret = clk_prepare_enable(priv->mac_clk); 2713 if (ret) 2714 goto out; 2715 2716 priv->rx_chan = 0; 2717 priv->tx_chan = 1; 2718 spin_lock_init(&priv->rx_lock); 2719 2720 /* init rx timeout (used for oom) */ 2721 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 2722 2723 /* register netdevice */ 2724 dev->netdev_ops = &bcm_enetsw_ops; 2725 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 2726 dev->ethtool_ops = &bcm_enetsw_ethtool_ops; 2727 SET_NETDEV_DEV(dev, &pdev->dev); 2728 2729 spin_lock_init(&priv->enetsw_mdio_lock); 2730 2731 ret = register_netdev(dev); 2732 if (ret) 2733 goto out_disable_clk; 2734 2735 netif_carrier_off(dev); 2736 platform_set_drvdata(pdev, dev); 2737 priv->pdev = pdev; 2738 priv->net_dev = dev; 2739 2740 return 0; 2741 2742 out_disable_clk: 2743 clk_disable_unprepare(priv->mac_clk); 2744 out: 2745 free_netdev(dev); 2746 return ret; 2747 } 2748 2749 2750 /* exit func, stops hardware and unregisters netdevice */ 2751 static int bcm_enetsw_remove(struct platform_device *pdev) 2752 { 2753 struct bcm_enet_priv *priv; 2754 struct net_device *dev; 2755 2756 /* stop netdevice */ 2757 dev = platform_get_drvdata(pdev); 2758 priv = netdev_priv(dev); 2759 unregister_netdev(dev); 2760 2761 clk_disable_unprepare(priv->mac_clk); 2762 2763 free_netdev(dev); 2764 return 0; 2765 } 2766 2767 struct platform_driver bcm63xx_enetsw_driver = { 2768 .probe = bcm_enetsw_probe, 2769 .remove = bcm_enetsw_remove, 2770 .driver = { 2771 .name = "bcm63xx_enetsw", 2772 .owner = THIS_MODULE, 2773 }, 2774 }; 2775 2776 /* reserve & remap memory space shared between all macs */ 2777 static int bcm_enet_shared_probe(struct platform_device *pdev) 2778 { 2779 struct resource *res; 2780 void __iomem *p[3]; 2781 unsigned int i; 2782 2783 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); 2784 2785 for (i = 0; i < 3; i++) { 2786 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 2787 p[i] = devm_ioremap_resource(&pdev->dev, res); 2788 if (IS_ERR(p[i])) 2789 return PTR_ERR(p[i]); 2790 } 2791 2792 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); 2793 2794 return 0; 2795 } 2796 2797 static int bcm_enet_shared_remove(struct platform_device *pdev) 2798 { 2799 return 0; 2800 } 2801 2802 /* this "shared" driver is needed because both macs share a single 2803 * address space 2804 */ 2805 struct platform_driver bcm63xx_enet_shared_driver = { 2806 .probe = bcm_enet_shared_probe, 2807 .remove = bcm_enet_shared_remove, 2808 .driver = { 2809 .name = "bcm63xx_enet_shared", 2810 .owner = THIS_MODULE, 2811 }, 2812 }; 2813 2814 static struct platform_driver * const drivers[] = { 2815 &bcm63xx_enet_shared_driver, 2816 &bcm63xx_enet_driver, 2817 &bcm63xx_enetsw_driver, 2818 }; 2819 2820 /* entry point */ 2821 static int __init bcm_enet_init(void) 2822 { 2823 return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 2824 } 2825 2826 static void __exit bcm_enet_exit(void) 2827 { 2828 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 2829 } 2830 2831 2832 module_init(bcm_enet_init); 2833 module_exit(bcm_enet_exit); 2834 2835 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); 2836 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 2837 MODULE_LICENSE("GPL"); 2838