1 /* 2 * Cadence MACB/GEM Ethernet Controller driver 3 * 4 * Copyright (C) 2004-2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 #include <linux/clk.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/circ_buf.h> 18 #include <linux/slab.h> 19 #include <linux/init.h> 20 #include <linux/io.h> 21 #include <linux/gpio.h> 22 #include <linux/gpio/consumer.h> 23 #include <linux/interrupt.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/platform_data/macb.h> 28 #include <linux/platform_device.h> 29 #include <linux/phy.h> 30 #include <linux/of.h> 31 #include <linux/of_device.h> 32 #include <linux/of_gpio.h> 33 #include <linux/of_mdio.h> 34 #include <linux/of_net.h> 35 #include <linux/ip.h> 36 #include <linux/udp.h> 37 #include <linux/tcp.h> 38 #include "macb.h" 39 40 #define MACB_RX_BUFFER_SIZE 128 41 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 42 43 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 44 #define MIN_RX_RING_SIZE 64 45 #define MAX_RX_RING_SIZE 8192 46 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 47 * (bp)->rx_ring_size) 48 49 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 50 #define MIN_TX_RING_SIZE 64 51 #define MAX_TX_RING_SIZE 4096 52 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 53 * (bp)->tx_ring_size) 54 55 /* level of occupied TX descriptors under which we wake up TX process */ 56 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 57 58 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 59 | MACB_BIT(ISR_ROVR)) 60 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 61 | MACB_BIT(ISR_RLE) \ 62 | MACB_BIT(TXERR)) 63 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 64 65 /* Max length of transmit frame must be a multiple of 8 bytes */ 66 #define MACB_TX_LEN_ALIGN 8 67 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 68 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 69 70 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 71 #define MACB_NETIF_LSO NETIF_F_TSO 72 73 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 74 #define MACB_WOL_ENABLED (0x1 << 1) 75 76 /* Graceful stop timeouts in us. We should allow up to 77 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 78 */ 79 #define MACB_HALT_TIMEOUT 1230 80 81 /* DMA buffer descriptor might be different size 82 * depends on hardware configuration: 83 * 84 * 1. dma address width 32 bits: 85 * word 1: 32 bit address of Data Buffer 86 * word 2: control 87 * 88 * 2. dma address width 64 bits: 89 * word 1: 32 bit address of Data Buffer 90 * word 2: control 91 * word 3: upper 32 bit address of Data Buffer 92 * word 4: unused 93 * 94 * 3. dma address width 32 bits with hardware timestamping: 95 * word 1: 32 bit address of Data Buffer 96 * word 2: control 97 * word 3: timestamp word 1 98 * word 4: timestamp word 2 99 * 100 * 4. dma address width 64 bits with hardware timestamping: 101 * word 1: 32 bit address of Data Buffer 102 * word 2: control 103 * word 3: upper 32 bit address of Data Buffer 104 * word 4: unused 105 * word 5: timestamp word 1 106 * word 6: timestamp word 2 107 */ 108 static unsigned int macb_dma_desc_get_size(struct macb *bp) 109 { 110 #ifdef MACB_EXT_DESC 111 unsigned int desc_size; 112 113 switch (bp->hw_dma_cap) { 114 case HW_DMA_CAP_64B: 115 desc_size = sizeof(struct macb_dma_desc) 116 + sizeof(struct macb_dma_desc_64); 117 break; 118 case HW_DMA_CAP_PTP: 119 desc_size = sizeof(struct macb_dma_desc) 120 + sizeof(struct macb_dma_desc_ptp); 121 break; 122 case HW_DMA_CAP_64B_PTP: 123 desc_size = sizeof(struct macb_dma_desc) 124 + sizeof(struct macb_dma_desc_64) 125 + sizeof(struct macb_dma_desc_ptp); 126 break; 127 default: 128 desc_size = sizeof(struct macb_dma_desc); 129 } 130 return desc_size; 131 #endif 132 return sizeof(struct macb_dma_desc); 133 } 134 135 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 136 { 137 #ifdef MACB_EXT_DESC 138 switch (bp->hw_dma_cap) { 139 case HW_DMA_CAP_64B: 140 case HW_DMA_CAP_PTP: 141 desc_idx <<= 1; 142 break; 143 case HW_DMA_CAP_64B_PTP: 144 desc_idx *= 3; 145 break; 146 default: 147 break; 148 } 149 #endif 150 return desc_idx; 151 } 152 153 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 154 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 155 { 156 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 157 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); 158 return NULL; 159 } 160 #endif 161 162 /* Ring buffer accessors */ 163 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 164 { 165 return index & (bp->tx_ring_size - 1); 166 } 167 168 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 169 unsigned int index) 170 { 171 index = macb_tx_ring_wrap(queue->bp, index); 172 index = macb_adj_dma_desc_idx(queue->bp, index); 173 return &queue->tx_ring[index]; 174 } 175 176 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 177 unsigned int index) 178 { 179 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 180 } 181 182 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 183 { 184 dma_addr_t offset; 185 186 offset = macb_tx_ring_wrap(queue->bp, index) * 187 macb_dma_desc_get_size(queue->bp); 188 189 return queue->tx_ring_dma + offset; 190 } 191 192 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 193 { 194 return index & (bp->rx_ring_size - 1); 195 } 196 197 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) 198 { 199 index = macb_rx_ring_wrap(bp, index); 200 index = macb_adj_dma_desc_idx(bp, index); 201 return &bp->rx_ring[index]; 202 } 203 204 static void *macb_rx_buffer(struct macb *bp, unsigned int index) 205 { 206 return bp->rx_buffers + bp->rx_buffer_size * 207 macb_rx_ring_wrap(bp, index); 208 } 209 210 /* I/O accessors */ 211 static u32 hw_readl_native(struct macb *bp, int offset) 212 { 213 return __raw_readl(bp->regs + offset); 214 } 215 216 static void hw_writel_native(struct macb *bp, int offset, u32 value) 217 { 218 __raw_writel(value, bp->regs + offset); 219 } 220 221 static u32 hw_readl(struct macb *bp, int offset) 222 { 223 return readl_relaxed(bp->regs + offset); 224 } 225 226 static void hw_writel(struct macb *bp, int offset, u32 value) 227 { 228 writel_relaxed(value, bp->regs + offset); 229 } 230 231 /* Find the CPU endianness by using the loopback bit of NCR register. When the 232 * CPU is in big endian we need to program swapped mode for management 233 * descriptor access. 234 */ 235 static bool hw_is_native_io(void __iomem *addr) 236 { 237 u32 value = MACB_BIT(LLB); 238 239 __raw_writel(value, addr + MACB_NCR); 240 value = __raw_readl(addr + MACB_NCR); 241 242 /* Write 0 back to disable everything */ 243 __raw_writel(0, addr + MACB_NCR); 244 245 return value == MACB_BIT(LLB); 246 } 247 248 static bool hw_is_gem(void __iomem *addr, bool native_io) 249 { 250 u32 id; 251 252 if (native_io) 253 id = __raw_readl(addr + MACB_MID); 254 else 255 id = readl_relaxed(addr + MACB_MID); 256 257 return MACB_BFEXT(IDNUM, id) >= 0x2; 258 } 259 260 static void macb_set_hwaddr(struct macb *bp) 261 { 262 u32 bottom; 263 u16 top; 264 265 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 266 macb_or_gem_writel(bp, SA1B, bottom); 267 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 268 macb_or_gem_writel(bp, SA1T, top); 269 270 /* Clear unused address register sets */ 271 macb_or_gem_writel(bp, SA2B, 0); 272 macb_or_gem_writel(bp, SA2T, 0); 273 macb_or_gem_writel(bp, SA3B, 0); 274 macb_or_gem_writel(bp, SA3T, 0); 275 macb_or_gem_writel(bp, SA4B, 0); 276 macb_or_gem_writel(bp, SA4T, 0); 277 } 278 279 static void macb_get_hwaddr(struct macb *bp) 280 { 281 struct macb_platform_data *pdata; 282 u32 bottom; 283 u16 top; 284 u8 addr[6]; 285 int i; 286 287 pdata = dev_get_platdata(&bp->pdev->dev); 288 289 /* Check all 4 address register for valid address */ 290 for (i = 0; i < 4; i++) { 291 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 292 top = macb_or_gem_readl(bp, SA1T + i * 8); 293 294 if (pdata && pdata->rev_eth_addr) { 295 addr[5] = bottom & 0xff; 296 addr[4] = (bottom >> 8) & 0xff; 297 addr[3] = (bottom >> 16) & 0xff; 298 addr[2] = (bottom >> 24) & 0xff; 299 addr[1] = top & 0xff; 300 addr[0] = (top & 0xff00) >> 8; 301 } else { 302 addr[0] = bottom & 0xff; 303 addr[1] = (bottom >> 8) & 0xff; 304 addr[2] = (bottom >> 16) & 0xff; 305 addr[3] = (bottom >> 24) & 0xff; 306 addr[4] = top & 0xff; 307 addr[5] = (top >> 8) & 0xff; 308 } 309 310 if (is_valid_ether_addr(addr)) { 311 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 312 return; 313 } 314 } 315 316 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 317 eth_hw_addr_random(bp->dev); 318 } 319 320 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 321 { 322 struct macb *bp = bus->priv; 323 int value; 324 325 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 326 | MACB_BF(RW, MACB_MAN_READ) 327 | MACB_BF(PHYA, mii_id) 328 | MACB_BF(REGA, regnum) 329 | MACB_BF(CODE, MACB_MAN_CODE))); 330 331 /* wait for end of transfer */ 332 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 333 cpu_relax(); 334 335 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 336 337 return value; 338 } 339 340 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 341 u16 value) 342 { 343 struct macb *bp = bus->priv; 344 345 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 346 | MACB_BF(RW, MACB_MAN_WRITE) 347 | MACB_BF(PHYA, mii_id) 348 | MACB_BF(REGA, regnum) 349 | MACB_BF(CODE, MACB_MAN_CODE) 350 | MACB_BF(DATA, value))); 351 352 /* wait for end of transfer */ 353 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 354 cpu_relax(); 355 356 return 0; 357 } 358 359 /** 360 * macb_set_tx_clk() - Set a clock to a new frequency 361 * @clk Pointer to the clock to change 362 * @rate New frequency in Hz 363 * @dev Pointer to the struct net_device 364 */ 365 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 366 { 367 long ferr, rate, rate_rounded; 368 369 if (!clk) 370 return; 371 372 switch (speed) { 373 case SPEED_10: 374 rate = 2500000; 375 break; 376 case SPEED_100: 377 rate = 25000000; 378 break; 379 case SPEED_1000: 380 rate = 125000000; 381 break; 382 default: 383 return; 384 } 385 386 rate_rounded = clk_round_rate(clk, rate); 387 if (rate_rounded < 0) 388 return; 389 390 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 391 * is not satisfied. 392 */ 393 ferr = abs(rate_rounded - rate); 394 ferr = DIV_ROUND_UP(ferr, rate / 100000); 395 if (ferr > 5) 396 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 397 rate); 398 399 if (clk_set_rate(clk, rate_rounded)) 400 netdev_err(dev, "adjusting tx_clk failed.\n"); 401 } 402 403 static void macb_handle_link_change(struct net_device *dev) 404 { 405 struct macb *bp = netdev_priv(dev); 406 struct phy_device *phydev = dev->phydev; 407 unsigned long flags; 408 int status_change = 0; 409 410 spin_lock_irqsave(&bp->lock, flags); 411 412 if (phydev->link) { 413 if ((bp->speed != phydev->speed) || 414 (bp->duplex != phydev->duplex)) { 415 u32 reg; 416 417 reg = macb_readl(bp, NCFGR); 418 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 419 if (macb_is_gem(bp)) 420 reg &= ~GEM_BIT(GBE); 421 422 if (phydev->duplex) 423 reg |= MACB_BIT(FD); 424 if (phydev->speed == SPEED_100) 425 reg |= MACB_BIT(SPD); 426 if (phydev->speed == SPEED_1000 && 427 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 428 reg |= GEM_BIT(GBE); 429 430 macb_or_gem_writel(bp, NCFGR, reg); 431 432 bp->speed = phydev->speed; 433 bp->duplex = phydev->duplex; 434 status_change = 1; 435 } 436 } 437 438 if (phydev->link != bp->link) { 439 if (!phydev->link) { 440 bp->speed = 0; 441 bp->duplex = -1; 442 } 443 bp->link = phydev->link; 444 445 status_change = 1; 446 } 447 448 spin_unlock_irqrestore(&bp->lock, flags); 449 450 if (status_change) { 451 if (phydev->link) { 452 /* Update the TX clock rate if and only if the link is 453 * up and there has been a link change. 454 */ 455 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); 456 457 netif_carrier_on(dev); 458 netdev_info(dev, "link up (%d/%s)\n", 459 phydev->speed, 460 phydev->duplex == DUPLEX_FULL ? 461 "Full" : "Half"); 462 } else { 463 netif_carrier_off(dev); 464 netdev_info(dev, "link down\n"); 465 } 466 } 467 } 468 469 /* based on au1000_eth. c*/ 470 static int macb_mii_probe(struct net_device *dev) 471 { 472 struct macb *bp = netdev_priv(dev); 473 struct macb_platform_data *pdata; 474 struct phy_device *phydev; 475 int phy_irq; 476 int ret; 477 478 if (bp->phy_node) { 479 phydev = of_phy_connect(dev, bp->phy_node, 480 &macb_handle_link_change, 0, 481 bp->phy_interface); 482 if (!phydev) 483 return -ENODEV; 484 } else { 485 phydev = phy_find_first(bp->mii_bus); 486 if (!phydev) { 487 netdev_err(dev, "no PHY found\n"); 488 return -ENXIO; 489 } 490 491 pdata = dev_get_platdata(&bp->pdev->dev); 492 if (pdata) { 493 if (gpio_is_valid(pdata->phy_irq_pin)) { 494 ret = devm_gpio_request(&bp->pdev->dev, 495 pdata->phy_irq_pin, "phy int"); 496 if (!ret) { 497 phy_irq = gpio_to_irq(pdata->phy_irq_pin); 498 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; 499 } 500 } else { 501 phydev->irq = PHY_POLL; 502 } 503 } 504 505 /* attach the mac to the phy */ 506 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 507 bp->phy_interface); 508 if (ret) { 509 netdev_err(dev, "Could not attach to PHY\n"); 510 return ret; 511 } 512 } 513 514 /* mask with MAC supported features */ 515 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 516 phydev->supported &= PHY_GBIT_FEATURES; 517 else 518 phydev->supported &= PHY_BASIC_FEATURES; 519 520 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) 521 phydev->supported &= ~SUPPORTED_1000baseT_Half; 522 523 phydev->advertising = phydev->supported; 524 525 bp->link = 0; 526 bp->speed = 0; 527 bp->duplex = -1; 528 529 return 0; 530 } 531 532 static int macb_mii_init(struct macb *bp) 533 { 534 struct macb_platform_data *pdata; 535 struct device_node *np; 536 int err = -ENXIO, i; 537 538 /* Enable management port */ 539 macb_writel(bp, NCR, MACB_BIT(MPE)); 540 541 bp->mii_bus = mdiobus_alloc(); 542 if (!bp->mii_bus) { 543 err = -ENOMEM; 544 goto err_out; 545 } 546 547 bp->mii_bus->name = "MACB_mii_bus"; 548 bp->mii_bus->read = &macb_mdio_read; 549 bp->mii_bus->write = &macb_mdio_write; 550 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 551 bp->pdev->name, bp->pdev->id); 552 bp->mii_bus->priv = bp; 553 bp->mii_bus->parent = &bp->pdev->dev; 554 pdata = dev_get_platdata(&bp->pdev->dev); 555 556 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 557 558 np = bp->pdev->dev.of_node; 559 if (np) { 560 if (of_phy_is_fixed_link(np)) { 561 if (of_phy_register_fixed_link(np) < 0) { 562 dev_err(&bp->pdev->dev, 563 "broken fixed-link specification\n"); 564 goto err_out_unregister_bus; 565 } 566 bp->phy_node = of_node_get(np); 567 568 err = mdiobus_register(bp->mii_bus); 569 } else { 570 /* try dt phy registration */ 571 err = of_mdiobus_register(bp->mii_bus, np); 572 573 /* fallback to standard phy registration if no phy were 574 * found during dt phy registration 575 */ 576 if (!err && !phy_find_first(bp->mii_bus)) { 577 for (i = 0; i < PHY_MAX_ADDR; i++) { 578 struct phy_device *phydev; 579 580 phydev = mdiobus_scan(bp->mii_bus, i); 581 if (IS_ERR(phydev) && 582 PTR_ERR(phydev) != -ENODEV) { 583 err = PTR_ERR(phydev); 584 break; 585 } 586 } 587 588 if (err) 589 goto err_out_unregister_bus; 590 } 591 } 592 } else { 593 for (i = 0; i < PHY_MAX_ADDR; i++) 594 bp->mii_bus->irq[i] = PHY_POLL; 595 596 if (pdata) 597 bp->mii_bus->phy_mask = pdata->phy_mask; 598 599 err = mdiobus_register(bp->mii_bus); 600 } 601 602 if (err) 603 goto err_out_free_mdiobus; 604 605 err = macb_mii_probe(bp->dev); 606 if (err) 607 goto err_out_unregister_bus; 608 609 return 0; 610 611 err_out_unregister_bus: 612 mdiobus_unregister(bp->mii_bus); 613 err_out_free_mdiobus: 614 of_node_put(bp->phy_node); 615 if (np && of_phy_is_fixed_link(np)) 616 of_phy_deregister_fixed_link(np); 617 mdiobus_free(bp->mii_bus); 618 err_out: 619 return err; 620 } 621 622 static void macb_update_stats(struct macb *bp) 623 { 624 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 625 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 626 int offset = MACB_PFR; 627 628 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 629 630 for (; p < end; p++, offset += 4) 631 *p += bp->macb_reg_readl(bp, offset); 632 } 633 634 static int macb_halt_tx(struct macb *bp) 635 { 636 unsigned long halt_time, timeout; 637 u32 status; 638 639 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 640 641 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 642 do { 643 halt_time = jiffies; 644 status = macb_readl(bp, TSR); 645 if (!(status & MACB_BIT(TGO))) 646 return 0; 647 648 usleep_range(10, 250); 649 } while (time_before(halt_time, timeout)); 650 651 return -ETIMEDOUT; 652 } 653 654 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 655 { 656 if (tx_skb->mapping) { 657 if (tx_skb->mapped_as_page) 658 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 659 tx_skb->size, DMA_TO_DEVICE); 660 else 661 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 662 tx_skb->size, DMA_TO_DEVICE); 663 tx_skb->mapping = 0; 664 } 665 666 if (tx_skb->skb) { 667 dev_kfree_skb_any(tx_skb->skb); 668 tx_skb->skb = NULL; 669 } 670 } 671 672 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 673 { 674 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 675 struct macb_dma_desc_64 *desc_64; 676 677 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 678 desc_64 = macb_64b_desc(bp, desc); 679 desc_64->addrh = upper_32_bits(addr); 680 } 681 #endif 682 desc->addr = lower_32_bits(addr); 683 } 684 685 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 686 { 687 dma_addr_t addr = 0; 688 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 689 struct macb_dma_desc_64 *desc_64; 690 691 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 692 desc_64 = macb_64b_desc(bp, desc); 693 addr = ((u64)(desc_64->addrh) << 32); 694 } 695 #endif 696 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 697 return addr; 698 } 699 700 static void macb_tx_error_task(struct work_struct *work) 701 { 702 struct macb_queue *queue = container_of(work, struct macb_queue, 703 tx_error_task); 704 struct macb *bp = queue->bp; 705 struct macb_tx_skb *tx_skb; 706 struct macb_dma_desc *desc; 707 struct sk_buff *skb; 708 unsigned int tail; 709 unsigned long flags; 710 711 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 712 (unsigned int)(queue - bp->queues), 713 queue->tx_tail, queue->tx_head); 714 715 /* Prevent the queue IRQ handlers from running: each of them may call 716 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 717 * As explained below, we have to halt the transmission before updating 718 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 719 * network engine about the macb/gem being halted. 720 */ 721 spin_lock_irqsave(&bp->lock, flags); 722 723 /* Make sure nobody is trying to queue up new packets */ 724 netif_tx_stop_all_queues(bp->dev); 725 726 /* Stop transmission now 727 * (in case we have just queued new packets) 728 * macb/gem must be halted to write TBQP register 729 */ 730 if (macb_halt_tx(bp)) 731 /* Just complain for now, reinitializing TX path can be good */ 732 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 733 734 /* Treat frames in TX queue including the ones that caused the error. 735 * Free transmit buffers in upper layer. 736 */ 737 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 738 u32 ctrl; 739 740 desc = macb_tx_desc(queue, tail); 741 ctrl = desc->ctrl; 742 tx_skb = macb_tx_skb(queue, tail); 743 skb = tx_skb->skb; 744 745 if (ctrl & MACB_BIT(TX_USED)) { 746 /* skb is set for the last buffer of the frame */ 747 while (!skb) { 748 macb_tx_unmap(bp, tx_skb); 749 tail++; 750 tx_skb = macb_tx_skb(queue, tail); 751 skb = tx_skb->skb; 752 } 753 754 /* ctrl still refers to the first buffer descriptor 755 * since it's the only one written back by the hardware 756 */ 757 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 758 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 759 macb_tx_ring_wrap(bp, tail), 760 skb->data); 761 bp->dev->stats.tx_packets++; 762 bp->dev->stats.tx_bytes += skb->len; 763 } 764 } else { 765 /* "Buffers exhausted mid-frame" errors may only happen 766 * if the driver is buggy, so complain loudly about 767 * those. Statistics are updated by hardware. 768 */ 769 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 770 netdev_err(bp->dev, 771 "BUG: TX buffers exhausted mid-frame\n"); 772 773 desc->ctrl = ctrl | MACB_BIT(TX_USED); 774 } 775 776 macb_tx_unmap(bp, tx_skb); 777 } 778 779 /* Set end of TX queue */ 780 desc = macb_tx_desc(queue, 0); 781 macb_set_addr(bp, desc, 0); 782 desc->ctrl = MACB_BIT(TX_USED); 783 784 /* Make descriptor updates visible to hardware */ 785 wmb(); 786 787 /* Reinitialize the TX desc queue */ 788 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 789 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 790 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 791 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 792 #endif 793 /* Make TX ring reflect state of hardware */ 794 queue->tx_head = 0; 795 queue->tx_tail = 0; 796 797 /* Housework before enabling TX IRQ */ 798 macb_writel(bp, TSR, macb_readl(bp, TSR)); 799 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 800 801 /* Now we are ready to start transmission again */ 802 netif_tx_start_all_queues(bp->dev); 803 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 804 805 spin_unlock_irqrestore(&bp->lock, flags); 806 } 807 808 static void macb_tx_interrupt(struct macb_queue *queue) 809 { 810 unsigned int tail; 811 unsigned int head; 812 u32 status; 813 struct macb *bp = queue->bp; 814 u16 queue_index = queue - bp->queues; 815 816 status = macb_readl(bp, TSR); 817 macb_writel(bp, TSR, status); 818 819 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 820 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 821 822 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 823 (unsigned long)status); 824 825 head = queue->tx_head; 826 for (tail = queue->tx_tail; tail != head; tail++) { 827 struct macb_tx_skb *tx_skb; 828 struct sk_buff *skb; 829 struct macb_dma_desc *desc; 830 u32 ctrl; 831 832 desc = macb_tx_desc(queue, tail); 833 834 /* Make hw descriptor updates visible to CPU */ 835 rmb(); 836 837 ctrl = desc->ctrl; 838 839 /* TX_USED bit is only set by hardware on the very first buffer 840 * descriptor of the transmitted frame. 841 */ 842 if (!(ctrl & MACB_BIT(TX_USED))) 843 break; 844 845 /* Process all buffers of the current transmitted frame */ 846 for (;; tail++) { 847 tx_skb = macb_tx_skb(queue, tail); 848 skb = tx_skb->skb; 849 850 /* First, update TX stats if needed */ 851 if (skb) { 852 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { 853 /* skb now belongs to timestamp buffer 854 * and will be removed later 855 */ 856 tx_skb->skb = NULL; 857 } 858 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 859 macb_tx_ring_wrap(bp, tail), 860 skb->data); 861 bp->dev->stats.tx_packets++; 862 bp->dev->stats.tx_bytes += skb->len; 863 } 864 865 /* Now we can safely release resources */ 866 macb_tx_unmap(bp, tx_skb); 867 868 /* skb is set only for the last buffer of the frame. 869 * WARNING: at this point skb has been freed by 870 * macb_tx_unmap(). 871 */ 872 if (skb) 873 break; 874 } 875 } 876 877 queue->tx_tail = tail; 878 if (__netif_subqueue_stopped(bp->dev, queue_index) && 879 CIRC_CNT(queue->tx_head, queue->tx_tail, 880 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 881 netif_wake_subqueue(bp->dev, queue_index); 882 } 883 884 static void gem_rx_refill(struct macb *bp) 885 { 886 unsigned int entry; 887 struct sk_buff *skb; 888 dma_addr_t paddr; 889 struct macb_dma_desc *desc; 890 891 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, 892 bp->rx_ring_size) > 0) { 893 entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head); 894 895 /* Make hw descriptor updates visible to CPU */ 896 rmb(); 897 898 bp->rx_prepared_head++; 899 desc = macb_rx_desc(bp, entry); 900 901 if (!bp->rx_skbuff[entry]) { 902 /* allocate sk_buff for this free entry in ring */ 903 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 904 if (unlikely(!skb)) { 905 netdev_err(bp->dev, 906 "Unable to allocate sk_buff\n"); 907 break; 908 } 909 910 /* now fill corresponding descriptor entry */ 911 paddr = dma_map_single(&bp->pdev->dev, skb->data, 912 bp->rx_buffer_size, 913 DMA_FROM_DEVICE); 914 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 915 dev_kfree_skb(skb); 916 break; 917 } 918 919 bp->rx_skbuff[entry] = skb; 920 921 if (entry == bp->rx_ring_size - 1) 922 paddr |= MACB_BIT(RX_WRAP); 923 macb_set_addr(bp, desc, paddr); 924 desc->ctrl = 0; 925 926 /* properly align Ethernet header */ 927 skb_reserve(skb, NET_IP_ALIGN); 928 } else { 929 desc->addr &= ~MACB_BIT(RX_USED); 930 desc->ctrl = 0; 931 } 932 } 933 934 /* Make descriptor updates visible to hardware */ 935 wmb(); 936 937 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", 938 bp->rx_prepared_head, bp->rx_tail); 939 } 940 941 /* Mark DMA descriptors from begin up to and not including end as unused */ 942 static void discard_partial_frame(struct macb *bp, unsigned int begin, 943 unsigned int end) 944 { 945 unsigned int frag; 946 947 for (frag = begin; frag != end; frag++) { 948 struct macb_dma_desc *desc = macb_rx_desc(bp, frag); 949 950 desc->addr &= ~MACB_BIT(RX_USED); 951 } 952 953 /* Make descriptor updates visible to hardware */ 954 wmb(); 955 956 /* When this happens, the hardware stats registers for 957 * whatever caused this is updated, so we don't have to record 958 * anything. 959 */ 960 } 961 962 static int gem_rx(struct macb *bp, int budget) 963 { 964 unsigned int len; 965 unsigned int entry; 966 struct sk_buff *skb; 967 struct macb_dma_desc *desc; 968 int count = 0; 969 970 while (count < budget) { 971 u32 ctrl; 972 dma_addr_t addr; 973 bool rxused; 974 975 entry = macb_rx_ring_wrap(bp, bp->rx_tail); 976 desc = macb_rx_desc(bp, entry); 977 978 /* Make hw descriptor updates visible to CPU */ 979 rmb(); 980 981 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 982 addr = macb_get_addr(bp, desc); 983 ctrl = desc->ctrl; 984 985 if (!rxused) 986 break; 987 988 bp->rx_tail++; 989 count++; 990 991 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 992 netdev_err(bp->dev, 993 "not whole frame pointed by descriptor\n"); 994 bp->dev->stats.rx_dropped++; 995 break; 996 } 997 skb = bp->rx_skbuff[entry]; 998 if (unlikely(!skb)) { 999 netdev_err(bp->dev, 1000 "inconsistent Rx descriptor chain\n"); 1001 bp->dev->stats.rx_dropped++; 1002 break; 1003 } 1004 /* now everything is ready for receiving packet */ 1005 bp->rx_skbuff[entry] = NULL; 1006 len = ctrl & bp->rx_frm_len_mask; 1007 1008 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1009 1010 skb_put(skb, len); 1011 dma_unmap_single(&bp->pdev->dev, addr, 1012 bp->rx_buffer_size, DMA_FROM_DEVICE); 1013 1014 skb->protocol = eth_type_trans(skb, bp->dev); 1015 skb_checksum_none_assert(skb); 1016 if (bp->dev->features & NETIF_F_RXCSUM && 1017 !(bp->dev->flags & IFF_PROMISC) && 1018 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1019 skb->ip_summed = CHECKSUM_UNNECESSARY; 1020 1021 bp->dev->stats.rx_packets++; 1022 bp->dev->stats.rx_bytes += skb->len; 1023 1024 gem_ptp_do_rxstamp(bp, skb, desc); 1025 1026 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1027 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1028 skb->len, skb->csum); 1029 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1030 skb_mac_header(skb), 16, true); 1031 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1032 skb->data, 32, true); 1033 #endif 1034 1035 netif_receive_skb(skb); 1036 } 1037 1038 gem_rx_refill(bp); 1039 1040 return count; 1041 } 1042 1043 static int macb_rx_frame(struct macb *bp, unsigned int first_frag, 1044 unsigned int last_frag) 1045 { 1046 unsigned int len; 1047 unsigned int frag; 1048 unsigned int offset; 1049 struct sk_buff *skb; 1050 struct macb_dma_desc *desc; 1051 1052 desc = macb_rx_desc(bp, last_frag); 1053 len = desc->ctrl & bp->rx_frm_len_mask; 1054 1055 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1056 macb_rx_ring_wrap(bp, first_frag), 1057 macb_rx_ring_wrap(bp, last_frag), len); 1058 1059 /* The ethernet header starts NET_IP_ALIGN bytes into the 1060 * first buffer. Since the header is 14 bytes, this makes the 1061 * payload word-aligned. 1062 * 1063 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1064 * the two padding bytes into the skb so that we avoid hitting 1065 * the slowpath in memcpy(), and pull them off afterwards. 1066 */ 1067 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1068 if (!skb) { 1069 bp->dev->stats.rx_dropped++; 1070 for (frag = first_frag; ; frag++) { 1071 desc = macb_rx_desc(bp, frag); 1072 desc->addr &= ~MACB_BIT(RX_USED); 1073 if (frag == last_frag) 1074 break; 1075 } 1076 1077 /* Make descriptor updates visible to hardware */ 1078 wmb(); 1079 1080 return 1; 1081 } 1082 1083 offset = 0; 1084 len += NET_IP_ALIGN; 1085 skb_checksum_none_assert(skb); 1086 skb_put(skb, len); 1087 1088 for (frag = first_frag; ; frag++) { 1089 unsigned int frag_len = bp->rx_buffer_size; 1090 1091 if (offset + frag_len > len) { 1092 if (unlikely(frag != last_frag)) { 1093 dev_kfree_skb_any(skb); 1094 return -1; 1095 } 1096 frag_len = len - offset; 1097 } 1098 skb_copy_to_linear_data_offset(skb, offset, 1099 macb_rx_buffer(bp, frag), 1100 frag_len); 1101 offset += bp->rx_buffer_size; 1102 desc = macb_rx_desc(bp, frag); 1103 desc->addr &= ~MACB_BIT(RX_USED); 1104 1105 if (frag == last_frag) 1106 break; 1107 } 1108 1109 /* Make descriptor updates visible to hardware */ 1110 wmb(); 1111 1112 __skb_pull(skb, NET_IP_ALIGN); 1113 skb->protocol = eth_type_trans(skb, bp->dev); 1114 1115 bp->dev->stats.rx_packets++; 1116 bp->dev->stats.rx_bytes += skb->len; 1117 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1118 skb->len, skb->csum); 1119 netif_receive_skb(skb); 1120 1121 return 0; 1122 } 1123 1124 static inline void macb_init_rx_ring(struct macb *bp) 1125 { 1126 dma_addr_t addr; 1127 struct macb_dma_desc *desc = NULL; 1128 int i; 1129 1130 addr = bp->rx_buffers_dma; 1131 for (i = 0; i < bp->rx_ring_size; i++) { 1132 desc = macb_rx_desc(bp, i); 1133 macb_set_addr(bp, desc, addr); 1134 desc->ctrl = 0; 1135 addr += bp->rx_buffer_size; 1136 } 1137 desc->addr |= MACB_BIT(RX_WRAP); 1138 bp->rx_tail = 0; 1139 } 1140 1141 static int macb_rx(struct macb *bp, int budget) 1142 { 1143 bool reset_rx_queue = false; 1144 int received = 0; 1145 unsigned int tail; 1146 int first_frag = -1; 1147 1148 for (tail = bp->rx_tail; budget > 0; tail++) { 1149 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); 1150 u32 ctrl; 1151 1152 /* Make hw descriptor updates visible to CPU */ 1153 rmb(); 1154 1155 ctrl = desc->ctrl; 1156 1157 if (!(desc->addr & MACB_BIT(RX_USED))) 1158 break; 1159 1160 if (ctrl & MACB_BIT(RX_SOF)) { 1161 if (first_frag != -1) 1162 discard_partial_frame(bp, first_frag, tail); 1163 first_frag = tail; 1164 } 1165 1166 if (ctrl & MACB_BIT(RX_EOF)) { 1167 int dropped; 1168 1169 if (unlikely(first_frag == -1)) { 1170 reset_rx_queue = true; 1171 continue; 1172 } 1173 1174 dropped = macb_rx_frame(bp, first_frag, tail); 1175 first_frag = -1; 1176 if (unlikely(dropped < 0)) { 1177 reset_rx_queue = true; 1178 continue; 1179 } 1180 if (!dropped) { 1181 received++; 1182 budget--; 1183 } 1184 } 1185 } 1186 1187 if (unlikely(reset_rx_queue)) { 1188 unsigned long flags; 1189 u32 ctrl; 1190 1191 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1192 1193 spin_lock_irqsave(&bp->lock, flags); 1194 1195 ctrl = macb_readl(bp, NCR); 1196 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1197 1198 macb_init_rx_ring(bp); 1199 macb_writel(bp, RBQP, bp->rx_ring_dma); 1200 1201 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1202 1203 spin_unlock_irqrestore(&bp->lock, flags); 1204 return received; 1205 } 1206 1207 if (first_frag != -1) 1208 bp->rx_tail = first_frag; 1209 else 1210 bp->rx_tail = tail; 1211 1212 return received; 1213 } 1214 1215 static int macb_poll(struct napi_struct *napi, int budget) 1216 { 1217 struct macb *bp = container_of(napi, struct macb, napi); 1218 int work_done; 1219 u32 status; 1220 1221 status = macb_readl(bp, RSR); 1222 macb_writel(bp, RSR, status); 1223 1224 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1225 (unsigned long)status, budget); 1226 1227 work_done = bp->macbgem_ops.mog_rx(bp, budget); 1228 if (work_done < budget) { 1229 napi_complete_done(napi, work_done); 1230 1231 /* Packets received while interrupts were disabled */ 1232 status = macb_readl(bp, RSR); 1233 if (status) { 1234 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1235 macb_writel(bp, ISR, MACB_BIT(RCOMP)); 1236 napi_reschedule(napi); 1237 } else { 1238 macb_writel(bp, IER, MACB_RX_INT_FLAGS); 1239 } 1240 } 1241 1242 /* TODO: Handle errors */ 1243 1244 return work_done; 1245 } 1246 1247 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1248 { 1249 struct macb_queue *queue = dev_id; 1250 struct macb *bp = queue->bp; 1251 struct net_device *dev = bp->dev; 1252 u32 status, ctrl; 1253 1254 status = queue_readl(queue, ISR); 1255 1256 if (unlikely(!status)) 1257 return IRQ_NONE; 1258 1259 spin_lock(&bp->lock); 1260 1261 while (status) { 1262 /* close possible race with dev_close */ 1263 if (unlikely(!netif_running(dev))) { 1264 queue_writel(queue, IDR, -1); 1265 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1266 queue_writel(queue, ISR, -1); 1267 break; 1268 } 1269 1270 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1271 (unsigned int)(queue - bp->queues), 1272 (unsigned long)status); 1273 1274 if (status & MACB_RX_INT_FLAGS) { 1275 /* There's no point taking any more interrupts 1276 * until we have processed the buffers. The 1277 * scheduling call may fail if the poll routine 1278 * is already scheduled, so disable interrupts 1279 * now. 1280 */ 1281 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1282 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1283 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1284 1285 if (napi_schedule_prep(&bp->napi)) { 1286 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1287 __napi_schedule(&bp->napi); 1288 } 1289 } 1290 1291 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1292 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1293 schedule_work(&queue->tx_error_task); 1294 1295 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1296 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1297 1298 break; 1299 } 1300 1301 if (status & MACB_BIT(TCOMP)) 1302 macb_tx_interrupt(queue); 1303 1304 /* Link change detection isn't possible with RMII, so we'll 1305 * add that if/when we get our hands on a full-blown MII PHY. 1306 */ 1307 1308 /* There is a hardware issue under heavy load where DMA can 1309 * stop, this causes endless "used buffer descriptor read" 1310 * interrupts but it can be cleared by re-enabling RX. See 1311 * the at91 manual, section 41.3.1 or the Zynq manual 1312 * section 16.7.4 for details. 1313 */ 1314 if (status & MACB_BIT(RXUBR)) { 1315 ctrl = macb_readl(bp, NCR); 1316 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1317 wmb(); 1318 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1319 1320 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1321 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1322 } 1323 1324 if (status & MACB_BIT(ISR_ROVR)) { 1325 /* We missed at least one packet */ 1326 if (macb_is_gem(bp)) 1327 bp->hw_stats.gem.rx_overruns++; 1328 else 1329 bp->hw_stats.macb.rx_overruns++; 1330 1331 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1332 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1333 } 1334 1335 if (status & MACB_BIT(HRESP)) { 1336 /* TODO: Reset the hardware, and maybe move the 1337 * netdev_err to a lower-priority context as well 1338 * (work queue?) 1339 */ 1340 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1341 1342 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1343 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1344 } 1345 status = queue_readl(queue, ISR); 1346 } 1347 1348 spin_unlock(&bp->lock); 1349 1350 return IRQ_HANDLED; 1351 } 1352 1353 #ifdef CONFIG_NET_POLL_CONTROLLER 1354 /* Polling receive - used by netconsole and other diagnostic tools 1355 * to allow network i/o with interrupts disabled. 1356 */ 1357 static void macb_poll_controller(struct net_device *dev) 1358 { 1359 struct macb *bp = netdev_priv(dev); 1360 struct macb_queue *queue; 1361 unsigned long flags; 1362 unsigned int q; 1363 1364 local_irq_save(flags); 1365 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1366 macb_interrupt(dev->irq, queue); 1367 local_irq_restore(flags); 1368 } 1369 #endif 1370 1371 static unsigned int macb_tx_map(struct macb *bp, 1372 struct macb_queue *queue, 1373 struct sk_buff *skb, 1374 unsigned int hdrlen) 1375 { 1376 dma_addr_t mapping; 1377 unsigned int len, entry, i, tx_head = queue->tx_head; 1378 struct macb_tx_skb *tx_skb = NULL; 1379 struct macb_dma_desc *desc; 1380 unsigned int offset, size, count = 0; 1381 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1382 unsigned int eof = 1, mss_mfs = 0; 1383 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1384 1385 /* LSO */ 1386 if (skb_shinfo(skb)->gso_size != 0) { 1387 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1388 /* UDP - UFO */ 1389 lso_ctrl = MACB_LSO_UFO_ENABLE; 1390 else 1391 /* TCP - TSO */ 1392 lso_ctrl = MACB_LSO_TSO_ENABLE; 1393 } 1394 1395 /* First, map non-paged data */ 1396 len = skb_headlen(skb); 1397 1398 /* first buffer length */ 1399 size = hdrlen; 1400 1401 offset = 0; 1402 while (len) { 1403 entry = macb_tx_ring_wrap(bp, tx_head); 1404 tx_skb = &queue->tx_skb[entry]; 1405 1406 mapping = dma_map_single(&bp->pdev->dev, 1407 skb->data + offset, 1408 size, DMA_TO_DEVICE); 1409 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1410 goto dma_error; 1411 1412 /* Save info to properly release resources */ 1413 tx_skb->skb = NULL; 1414 tx_skb->mapping = mapping; 1415 tx_skb->size = size; 1416 tx_skb->mapped_as_page = false; 1417 1418 len -= size; 1419 offset += size; 1420 count++; 1421 tx_head++; 1422 1423 size = min(len, bp->max_tx_length); 1424 } 1425 1426 /* Then, map paged data from fragments */ 1427 for (f = 0; f < nr_frags; f++) { 1428 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1429 1430 len = skb_frag_size(frag); 1431 offset = 0; 1432 while (len) { 1433 size = min(len, bp->max_tx_length); 1434 entry = macb_tx_ring_wrap(bp, tx_head); 1435 tx_skb = &queue->tx_skb[entry]; 1436 1437 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1438 offset, size, DMA_TO_DEVICE); 1439 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1440 goto dma_error; 1441 1442 /* Save info to properly release resources */ 1443 tx_skb->skb = NULL; 1444 tx_skb->mapping = mapping; 1445 tx_skb->size = size; 1446 tx_skb->mapped_as_page = true; 1447 1448 len -= size; 1449 offset += size; 1450 count++; 1451 tx_head++; 1452 } 1453 } 1454 1455 /* Should never happen */ 1456 if (unlikely(!tx_skb)) { 1457 netdev_err(bp->dev, "BUG! empty skb!\n"); 1458 return 0; 1459 } 1460 1461 /* This is the last buffer of the frame: save socket buffer */ 1462 tx_skb->skb = skb; 1463 1464 /* Update TX ring: update buffer descriptors in reverse order 1465 * to avoid race condition 1466 */ 1467 1468 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1469 * to set the end of TX queue 1470 */ 1471 i = tx_head; 1472 entry = macb_tx_ring_wrap(bp, i); 1473 ctrl = MACB_BIT(TX_USED); 1474 desc = macb_tx_desc(queue, entry); 1475 desc->ctrl = ctrl; 1476 1477 if (lso_ctrl) { 1478 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1479 /* include header and FCS in value given to h/w */ 1480 mss_mfs = skb_shinfo(skb)->gso_size + 1481 skb_transport_offset(skb) + 1482 ETH_FCS_LEN; 1483 else /* TSO */ { 1484 mss_mfs = skb_shinfo(skb)->gso_size; 1485 /* TCP Sequence Number Source Select 1486 * can be set only for TSO 1487 */ 1488 seq_ctrl = 0; 1489 } 1490 } 1491 1492 do { 1493 i--; 1494 entry = macb_tx_ring_wrap(bp, i); 1495 tx_skb = &queue->tx_skb[entry]; 1496 desc = macb_tx_desc(queue, entry); 1497 1498 ctrl = (u32)tx_skb->size; 1499 if (eof) { 1500 ctrl |= MACB_BIT(TX_LAST); 1501 eof = 0; 1502 } 1503 if (unlikely(entry == (bp->tx_ring_size - 1))) 1504 ctrl |= MACB_BIT(TX_WRAP); 1505 1506 /* First descriptor is header descriptor */ 1507 if (i == queue->tx_head) { 1508 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1509 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1510 } else 1511 /* Only set MSS/MFS on payload descriptors 1512 * (second or later descriptor) 1513 */ 1514 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1515 1516 /* Set TX buffer descriptor */ 1517 macb_set_addr(bp, desc, tx_skb->mapping); 1518 /* desc->addr must be visible to hardware before clearing 1519 * 'TX_USED' bit in desc->ctrl. 1520 */ 1521 wmb(); 1522 desc->ctrl = ctrl; 1523 } while (i != queue->tx_head); 1524 1525 queue->tx_head = tx_head; 1526 1527 return count; 1528 1529 dma_error: 1530 netdev_err(bp->dev, "TX DMA map failed\n"); 1531 1532 for (i = queue->tx_head; i != tx_head; i++) { 1533 tx_skb = macb_tx_skb(queue, i); 1534 1535 macb_tx_unmap(bp, tx_skb); 1536 } 1537 1538 return 0; 1539 } 1540 1541 static netdev_features_t macb_features_check(struct sk_buff *skb, 1542 struct net_device *dev, 1543 netdev_features_t features) 1544 { 1545 unsigned int nr_frags, f; 1546 unsigned int hdrlen; 1547 1548 /* Validate LSO compatibility */ 1549 1550 /* there is only one buffer */ 1551 if (!skb_is_nonlinear(skb)) 1552 return features; 1553 1554 /* length of header */ 1555 hdrlen = skb_transport_offset(skb); 1556 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1557 hdrlen += tcp_hdrlen(skb); 1558 1559 /* For LSO: 1560 * When software supplies two or more payload buffers all payload buffers 1561 * apart from the last must be a multiple of 8 bytes in size. 1562 */ 1563 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 1564 return features & ~MACB_NETIF_LSO; 1565 1566 nr_frags = skb_shinfo(skb)->nr_frags; 1567 /* No need to check last fragment */ 1568 nr_frags--; 1569 for (f = 0; f < nr_frags; f++) { 1570 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1571 1572 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 1573 return features & ~MACB_NETIF_LSO; 1574 } 1575 return features; 1576 } 1577 1578 static inline int macb_clear_csum(struct sk_buff *skb) 1579 { 1580 /* no change for packets without checksum offloading */ 1581 if (skb->ip_summed != CHECKSUM_PARTIAL) 1582 return 0; 1583 1584 /* make sure we can modify the header */ 1585 if (unlikely(skb_cow_head(skb, 0))) 1586 return -1; 1587 1588 /* initialize checksum field 1589 * This is required - at least for Zynq, which otherwise calculates 1590 * wrong UDP header checksums for UDP packets with UDP data len <=2 1591 */ 1592 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1593 return 0; 1594 } 1595 1596 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1597 { 1598 u16 queue_index = skb_get_queue_mapping(skb); 1599 struct macb *bp = netdev_priv(dev); 1600 struct macb_queue *queue = &bp->queues[queue_index]; 1601 unsigned long flags; 1602 unsigned int desc_cnt, nr_frags, frag_size, f; 1603 unsigned int hdrlen; 1604 bool is_lso, is_udp = 0; 1605 1606 is_lso = (skb_shinfo(skb)->gso_size != 0); 1607 1608 if (is_lso) { 1609 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); 1610 1611 /* length of headers */ 1612 if (is_udp) 1613 /* only queue eth + ip headers separately for UDP */ 1614 hdrlen = skb_transport_offset(skb); 1615 else 1616 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 1617 if (skb_headlen(skb) < hdrlen) { 1618 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 1619 /* if this is required, would need to copy to single buffer */ 1620 return NETDEV_TX_BUSY; 1621 } 1622 } else 1623 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 1624 1625 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1626 netdev_vdbg(bp->dev, 1627 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1628 queue_index, skb->len, skb->head, skb->data, 1629 skb_tail_pointer(skb), skb_end_pointer(skb)); 1630 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1631 skb->data, 16, true); 1632 #endif 1633 1634 /* Count how many TX buffer descriptors are needed to send this 1635 * socket buffer: skb fragments of jumbo frames may need to be 1636 * split into many buffer descriptors. 1637 */ 1638 if (is_lso && (skb_headlen(skb) > hdrlen)) 1639 /* extra header descriptor if also payload in first buffer */ 1640 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 1641 else 1642 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1643 nr_frags = skb_shinfo(skb)->nr_frags; 1644 for (f = 0; f < nr_frags; f++) { 1645 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1646 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1647 } 1648 1649 spin_lock_irqsave(&bp->lock, flags); 1650 1651 /* This is a hard error, log it. */ 1652 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 1653 bp->tx_ring_size) < desc_cnt) { 1654 netif_stop_subqueue(dev, queue_index); 1655 spin_unlock_irqrestore(&bp->lock, flags); 1656 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1657 queue->tx_head, queue->tx_tail); 1658 return NETDEV_TX_BUSY; 1659 } 1660 1661 if (macb_clear_csum(skb)) { 1662 dev_kfree_skb_any(skb); 1663 goto unlock; 1664 } 1665 1666 /* Map socket buffer for DMA transfer */ 1667 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 1668 dev_kfree_skb_any(skb); 1669 goto unlock; 1670 } 1671 1672 /* Make newly initialized descriptor visible to hardware */ 1673 wmb(); 1674 skb_tx_timestamp(skb); 1675 1676 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1677 1678 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 1679 netif_stop_subqueue(dev, queue_index); 1680 1681 unlock: 1682 spin_unlock_irqrestore(&bp->lock, flags); 1683 1684 return NETDEV_TX_OK; 1685 } 1686 1687 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 1688 { 1689 if (!macb_is_gem(bp)) { 1690 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 1691 } else { 1692 bp->rx_buffer_size = size; 1693 1694 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 1695 netdev_dbg(bp->dev, 1696 "RX buffer must be multiple of %d bytes, expanding\n", 1697 RX_BUFFER_MULTIPLE); 1698 bp->rx_buffer_size = 1699 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 1700 } 1701 } 1702 1703 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 1704 bp->dev->mtu, bp->rx_buffer_size); 1705 } 1706 1707 static void gem_free_rx_buffers(struct macb *bp) 1708 { 1709 struct sk_buff *skb; 1710 struct macb_dma_desc *desc; 1711 dma_addr_t addr; 1712 int i; 1713 1714 if (!bp->rx_skbuff) 1715 return; 1716 1717 for (i = 0; i < bp->rx_ring_size; i++) { 1718 skb = bp->rx_skbuff[i]; 1719 1720 if (!skb) 1721 continue; 1722 1723 desc = macb_rx_desc(bp, i); 1724 addr = macb_get_addr(bp, desc); 1725 1726 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1727 DMA_FROM_DEVICE); 1728 dev_kfree_skb_any(skb); 1729 skb = NULL; 1730 } 1731 1732 kfree(bp->rx_skbuff); 1733 bp->rx_skbuff = NULL; 1734 } 1735 1736 static void macb_free_rx_buffers(struct macb *bp) 1737 { 1738 if (bp->rx_buffers) { 1739 dma_free_coherent(&bp->pdev->dev, 1740 bp->rx_ring_size * bp->rx_buffer_size, 1741 bp->rx_buffers, bp->rx_buffers_dma); 1742 bp->rx_buffers = NULL; 1743 } 1744 } 1745 1746 static void macb_free_consistent(struct macb *bp) 1747 { 1748 struct macb_queue *queue; 1749 unsigned int q; 1750 1751 bp->macbgem_ops.mog_free_rx_buffers(bp); 1752 if (bp->rx_ring) { 1753 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), 1754 bp->rx_ring, bp->rx_ring_dma); 1755 bp->rx_ring = NULL; 1756 } 1757 1758 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1759 kfree(queue->tx_skb); 1760 queue->tx_skb = NULL; 1761 if (queue->tx_ring) { 1762 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), 1763 queue->tx_ring, queue->tx_ring_dma); 1764 queue->tx_ring = NULL; 1765 } 1766 } 1767 } 1768 1769 static int gem_alloc_rx_buffers(struct macb *bp) 1770 { 1771 int size; 1772 1773 size = bp->rx_ring_size * sizeof(struct sk_buff *); 1774 bp->rx_skbuff = kzalloc(size, GFP_KERNEL); 1775 if (!bp->rx_skbuff) 1776 return -ENOMEM; 1777 else 1778 netdev_dbg(bp->dev, 1779 "Allocated %d RX struct sk_buff entries at %p\n", 1780 bp->rx_ring_size, bp->rx_skbuff); 1781 return 0; 1782 } 1783 1784 static int macb_alloc_rx_buffers(struct macb *bp) 1785 { 1786 int size; 1787 1788 size = bp->rx_ring_size * bp->rx_buffer_size; 1789 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 1790 &bp->rx_buffers_dma, GFP_KERNEL); 1791 if (!bp->rx_buffers) 1792 return -ENOMEM; 1793 1794 netdev_dbg(bp->dev, 1795 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 1796 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); 1797 return 0; 1798 } 1799 1800 static int macb_alloc_consistent(struct macb *bp) 1801 { 1802 struct macb_queue *queue; 1803 unsigned int q; 1804 int size; 1805 1806 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1807 size = TX_RING_BYTES(bp); 1808 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1809 &queue->tx_ring_dma, 1810 GFP_KERNEL); 1811 if (!queue->tx_ring) 1812 goto out_err; 1813 netdev_dbg(bp->dev, 1814 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 1815 q, size, (unsigned long)queue->tx_ring_dma, 1816 queue->tx_ring); 1817 1818 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 1819 queue->tx_skb = kmalloc(size, GFP_KERNEL); 1820 if (!queue->tx_skb) 1821 goto out_err; 1822 } 1823 1824 size = RX_RING_BYTES(bp); 1825 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1826 &bp->rx_ring_dma, GFP_KERNEL); 1827 if (!bp->rx_ring) 1828 goto out_err; 1829 netdev_dbg(bp->dev, 1830 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 1831 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); 1832 1833 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 1834 goto out_err; 1835 1836 return 0; 1837 1838 out_err: 1839 macb_free_consistent(bp); 1840 return -ENOMEM; 1841 } 1842 1843 static void gem_init_rings(struct macb *bp) 1844 { 1845 struct macb_queue *queue; 1846 struct macb_dma_desc *desc = NULL; 1847 unsigned int q; 1848 int i; 1849 1850 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1851 for (i = 0; i < bp->tx_ring_size; i++) { 1852 desc = macb_tx_desc(queue, i); 1853 macb_set_addr(bp, desc, 0); 1854 desc->ctrl = MACB_BIT(TX_USED); 1855 } 1856 desc->ctrl |= MACB_BIT(TX_WRAP); 1857 queue->tx_head = 0; 1858 queue->tx_tail = 0; 1859 } 1860 1861 bp->rx_tail = 0; 1862 bp->rx_prepared_head = 0; 1863 1864 gem_rx_refill(bp); 1865 } 1866 1867 static void macb_init_rings(struct macb *bp) 1868 { 1869 int i; 1870 struct macb_dma_desc *desc = NULL; 1871 1872 macb_init_rx_ring(bp); 1873 1874 for (i = 0; i < bp->tx_ring_size; i++) { 1875 desc = macb_tx_desc(&bp->queues[0], i); 1876 macb_set_addr(bp, desc, 0); 1877 desc->ctrl = MACB_BIT(TX_USED); 1878 } 1879 bp->queues[0].tx_head = 0; 1880 bp->queues[0].tx_tail = 0; 1881 desc->ctrl |= MACB_BIT(TX_WRAP); 1882 } 1883 1884 static void macb_reset_hw(struct macb *bp) 1885 { 1886 struct macb_queue *queue; 1887 unsigned int q; 1888 1889 /* Disable RX and TX (XXX: Should we halt the transmission 1890 * more gracefully?) 1891 */ 1892 macb_writel(bp, NCR, 0); 1893 1894 /* Clear the stats registers (XXX: Update stats first?) */ 1895 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 1896 1897 /* Clear all status flags */ 1898 macb_writel(bp, TSR, -1); 1899 macb_writel(bp, RSR, -1); 1900 1901 /* Disable all interrupts */ 1902 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1903 queue_writel(queue, IDR, -1); 1904 queue_readl(queue, ISR); 1905 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1906 queue_writel(queue, ISR, -1); 1907 } 1908 } 1909 1910 static u32 gem_mdc_clk_div(struct macb *bp) 1911 { 1912 u32 config; 1913 unsigned long pclk_hz = clk_get_rate(bp->pclk); 1914 1915 if (pclk_hz <= 20000000) 1916 config = GEM_BF(CLK, GEM_CLK_DIV8); 1917 else if (pclk_hz <= 40000000) 1918 config = GEM_BF(CLK, GEM_CLK_DIV16); 1919 else if (pclk_hz <= 80000000) 1920 config = GEM_BF(CLK, GEM_CLK_DIV32); 1921 else if (pclk_hz <= 120000000) 1922 config = GEM_BF(CLK, GEM_CLK_DIV48); 1923 else if (pclk_hz <= 160000000) 1924 config = GEM_BF(CLK, GEM_CLK_DIV64); 1925 else 1926 config = GEM_BF(CLK, GEM_CLK_DIV96); 1927 1928 return config; 1929 } 1930 1931 static u32 macb_mdc_clk_div(struct macb *bp) 1932 { 1933 u32 config; 1934 unsigned long pclk_hz; 1935 1936 if (macb_is_gem(bp)) 1937 return gem_mdc_clk_div(bp); 1938 1939 pclk_hz = clk_get_rate(bp->pclk); 1940 if (pclk_hz <= 20000000) 1941 config = MACB_BF(CLK, MACB_CLK_DIV8); 1942 else if (pclk_hz <= 40000000) 1943 config = MACB_BF(CLK, MACB_CLK_DIV16); 1944 else if (pclk_hz <= 80000000) 1945 config = MACB_BF(CLK, MACB_CLK_DIV32); 1946 else 1947 config = MACB_BF(CLK, MACB_CLK_DIV64); 1948 1949 return config; 1950 } 1951 1952 /* Get the DMA bus width field of the network configuration register that we 1953 * should program. We find the width from decoding the design configuration 1954 * register to find the maximum supported data bus width. 1955 */ 1956 static u32 macb_dbw(struct macb *bp) 1957 { 1958 if (!macb_is_gem(bp)) 1959 return 0; 1960 1961 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 1962 case 4: 1963 return GEM_BF(DBW, GEM_DBW128); 1964 case 2: 1965 return GEM_BF(DBW, GEM_DBW64); 1966 case 1: 1967 default: 1968 return GEM_BF(DBW, GEM_DBW32); 1969 } 1970 } 1971 1972 /* Configure the receive DMA engine 1973 * - use the correct receive buffer size 1974 * - set best burst length for DMA operations 1975 * (if not supported by FIFO, it will fallback to default) 1976 * - set both rx/tx packet buffers to full memory size 1977 * These are configurable parameters for GEM. 1978 */ 1979 static void macb_configure_dma(struct macb *bp) 1980 { 1981 u32 dmacfg; 1982 1983 if (macb_is_gem(bp)) { 1984 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1985 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); 1986 if (bp->dma_burst_length) 1987 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 1988 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1989 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1990 1991 if (bp->native_io) 1992 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1993 else 1994 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1995 1996 if (bp->dev->features & NETIF_F_HW_CSUM) 1997 dmacfg |= GEM_BIT(TXCOEN); 1998 else 1999 dmacfg &= ~GEM_BIT(TXCOEN); 2000 2001 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2002 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2003 dmacfg |= GEM_BIT(ADDR64); 2004 #endif 2005 #ifdef CONFIG_MACB_USE_HWSTAMP 2006 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2007 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2008 #endif 2009 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2010 dmacfg); 2011 gem_writel(bp, DMACFG, dmacfg); 2012 } 2013 } 2014 2015 static void macb_init_hw(struct macb *bp) 2016 { 2017 struct macb_queue *queue; 2018 unsigned int q; 2019 2020 u32 config; 2021 2022 macb_reset_hw(bp); 2023 macb_set_hwaddr(bp); 2024 2025 config = macb_mdc_clk_div(bp); 2026 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 2027 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 2028 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2029 config |= MACB_BIT(PAE); /* PAuse Enable */ 2030 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2031 if (bp->caps & MACB_CAPS_JUMBO) 2032 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2033 else 2034 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2035 if (bp->dev->flags & IFF_PROMISC) 2036 config |= MACB_BIT(CAF); /* Copy All Frames */ 2037 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2038 config |= GEM_BIT(RXCOEN); 2039 if (!(bp->dev->flags & IFF_BROADCAST)) 2040 config |= MACB_BIT(NBC); /* No BroadCast */ 2041 config |= macb_dbw(bp); 2042 macb_writel(bp, NCFGR, config); 2043 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2044 gem_writel(bp, JML, bp->jumbo_max_len); 2045 bp->speed = SPEED_10; 2046 bp->duplex = DUPLEX_HALF; 2047 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2048 if (bp->caps & MACB_CAPS_JUMBO) 2049 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2050 2051 macb_configure_dma(bp); 2052 2053 /* Initialize TX and RX buffers */ 2054 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma)); 2055 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2056 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2057 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma)); 2058 #endif 2059 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2060 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 2061 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2062 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2063 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 2064 #endif 2065 2066 /* Enable interrupts */ 2067 queue_writel(queue, IER, 2068 MACB_RX_INT_FLAGS | 2069 MACB_TX_INT_FLAGS | 2070 MACB_BIT(HRESP)); 2071 } 2072 2073 /* Enable TX and RX */ 2074 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 2075 } 2076 2077 /* The hash address register is 64 bits long and takes up two 2078 * locations in the memory map. The least significant bits are stored 2079 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2080 * 2081 * The unicast hash enable and the multicast hash enable bits in the 2082 * network configuration register enable the reception of hash matched 2083 * frames. The destination address is reduced to a 6 bit index into 2084 * the 64 bit hash register using the following hash function. The 2085 * hash function is an exclusive or of every sixth bit of the 2086 * destination address. 2087 * 2088 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2089 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2090 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2091 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2092 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2093 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2094 * 2095 * da[0] represents the least significant bit of the first byte 2096 * received, that is, the multicast/unicast indicator, and da[47] 2097 * represents the most significant bit of the last byte received. If 2098 * the hash index, hi[n], points to a bit that is set in the hash 2099 * register then the frame will be matched according to whether the 2100 * frame is multicast or unicast. A multicast match will be signalled 2101 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2102 * index points to a bit set in the hash register. A unicast match 2103 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2104 * and the hash index points to a bit set in the hash register. To 2105 * receive all multicast frames, the hash register should be set with 2106 * all ones and the multicast hash enable bit should be set in the 2107 * network configuration register. 2108 */ 2109 2110 static inline int hash_bit_value(int bitnr, __u8 *addr) 2111 { 2112 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2113 return 1; 2114 return 0; 2115 } 2116 2117 /* Return the hash index value for the specified address. */ 2118 static int hash_get_index(__u8 *addr) 2119 { 2120 int i, j, bitval; 2121 int hash_index = 0; 2122 2123 for (j = 0; j < 6; j++) { 2124 for (i = 0, bitval = 0; i < 8; i++) 2125 bitval ^= hash_bit_value(i * 6 + j, addr); 2126 2127 hash_index |= (bitval << j); 2128 } 2129 2130 return hash_index; 2131 } 2132 2133 /* Add multicast addresses to the internal multicast-hash table. */ 2134 static void macb_sethashtable(struct net_device *dev) 2135 { 2136 struct netdev_hw_addr *ha; 2137 unsigned long mc_filter[2]; 2138 unsigned int bitnr; 2139 struct macb *bp = netdev_priv(dev); 2140 2141 mc_filter[0] = 0; 2142 mc_filter[1] = 0; 2143 2144 netdev_for_each_mc_addr(ha, dev) { 2145 bitnr = hash_get_index(ha->addr); 2146 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2147 } 2148 2149 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2150 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2151 } 2152 2153 /* Enable/Disable promiscuous and multicast modes. */ 2154 static void macb_set_rx_mode(struct net_device *dev) 2155 { 2156 unsigned long cfg; 2157 struct macb *bp = netdev_priv(dev); 2158 2159 cfg = macb_readl(bp, NCFGR); 2160 2161 if (dev->flags & IFF_PROMISC) { 2162 /* Enable promiscuous mode */ 2163 cfg |= MACB_BIT(CAF); 2164 2165 /* Disable RX checksum offload */ 2166 if (macb_is_gem(bp)) 2167 cfg &= ~GEM_BIT(RXCOEN); 2168 } else { 2169 /* Disable promiscuous mode */ 2170 cfg &= ~MACB_BIT(CAF); 2171 2172 /* Enable RX checksum offload only if requested */ 2173 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2174 cfg |= GEM_BIT(RXCOEN); 2175 } 2176 2177 if (dev->flags & IFF_ALLMULTI) { 2178 /* Enable all multicast mode */ 2179 macb_or_gem_writel(bp, HRB, -1); 2180 macb_or_gem_writel(bp, HRT, -1); 2181 cfg |= MACB_BIT(NCFGR_MTI); 2182 } else if (!netdev_mc_empty(dev)) { 2183 /* Enable specific multicasts */ 2184 macb_sethashtable(dev); 2185 cfg |= MACB_BIT(NCFGR_MTI); 2186 } else if (dev->flags & (~IFF_ALLMULTI)) { 2187 /* Disable all multicast mode */ 2188 macb_or_gem_writel(bp, HRB, 0); 2189 macb_or_gem_writel(bp, HRT, 0); 2190 cfg &= ~MACB_BIT(NCFGR_MTI); 2191 } 2192 2193 macb_writel(bp, NCFGR, cfg); 2194 } 2195 2196 static int macb_open(struct net_device *dev) 2197 { 2198 struct macb *bp = netdev_priv(dev); 2199 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2200 int err; 2201 2202 netdev_dbg(bp->dev, "open\n"); 2203 2204 /* carrier starts down */ 2205 netif_carrier_off(dev); 2206 2207 /* if the phy is not yet register, retry later*/ 2208 if (!dev->phydev) 2209 return -EAGAIN; 2210 2211 /* RX buffers initialization */ 2212 macb_init_rx_buffer_size(bp, bufsz); 2213 2214 err = macb_alloc_consistent(bp); 2215 if (err) { 2216 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2217 err); 2218 return err; 2219 } 2220 2221 napi_enable(&bp->napi); 2222 2223 bp->macbgem_ops.mog_init_rings(bp); 2224 macb_init_hw(bp); 2225 2226 /* schedule a link state check */ 2227 phy_start(dev->phydev); 2228 2229 netif_tx_start_all_queues(dev); 2230 2231 if (bp->ptp_info) 2232 bp->ptp_info->ptp_init(dev); 2233 2234 return 0; 2235 } 2236 2237 static int macb_close(struct net_device *dev) 2238 { 2239 struct macb *bp = netdev_priv(dev); 2240 unsigned long flags; 2241 2242 netif_tx_stop_all_queues(dev); 2243 napi_disable(&bp->napi); 2244 2245 if (dev->phydev) 2246 phy_stop(dev->phydev); 2247 2248 spin_lock_irqsave(&bp->lock, flags); 2249 macb_reset_hw(bp); 2250 netif_carrier_off(dev); 2251 spin_unlock_irqrestore(&bp->lock, flags); 2252 2253 macb_free_consistent(bp); 2254 2255 if (bp->ptp_info) 2256 bp->ptp_info->ptp_remove(dev); 2257 2258 return 0; 2259 } 2260 2261 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2262 { 2263 if (netif_running(dev)) 2264 return -EBUSY; 2265 2266 dev->mtu = new_mtu; 2267 2268 return 0; 2269 } 2270 2271 static void gem_update_stats(struct macb *bp) 2272 { 2273 unsigned int i; 2274 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2275 2276 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2277 u32 offset = gem_statistics[i].offset; 2278 u64 val = bp->macb_reg_readl(bp, offset); 2279 2280 bp->ethtool_stats[i] += val; 2281 *p += val; 2282 2283 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2284 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2285 val = bp->macb_reg_readl(bp, offset + 4); 2286 bp->ethtool_stats[i] += ((u64)val) << 32; 2287 *(++p) += val; 2288 } 2289 } 2290 } 2291 2292 static struct net_device_stats *gem_get_stats(struct macb *bp) 2293 { 2294 struct gem_stats *hwstat = &bp->hw_stats.gem; 2295 struct net_device_stats *nstat = &bp->dev->stats; 2296 2297 gem_update_stats(bp); 2298 2299 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2300 hwstat->rx_alignment_errors + 2301 hwstat->rx_resource_errors + 2302 hwstat->rx_overruns + 2303 hwstat->rx_oversize_frames + 2304 hwstat->rx_jabbers + 2305 hwstat->rx_undersized_frames + 2306 hwstat->rx_length_field_frame_errors); 2307 nstat->tx_errors = (hwstat->tx_late_collisions + 2308 hwstat->tx_excessive_collisions + 2309 hwstat->tx_underrun + 2310 hwstat->tx_carrier_sense_errors); 2311 nstat->multicast = hwstat->rx_multicast_frames; 2312 nstat->collisions = (hwstat->tx_single_collision_frames + 2313 hwstat->tx_multiple_collision_frames + 2314 hwstat->tx_excessive_collisions); 2315 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2316 hwstat->rx_jabbers + 2317 hwstat->rx_undersized_frames + 2318 hwstat->rx_length_field_frame_errors); 2319 nstat->rx_over_errors = hwstat->rx_resource_errors; 2320 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2321 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2322 nstat->rx_fifo_errors = hwstat->rx_overruns; 2323 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2324 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2325 nstat->tx_fifo_errors = hwstat->tx_underrun; 2326 2327 return nstat; 2328 } 2329 2330 static void gem_get_ethtool_stats(struct net_device *dev, 2331 struct ethtool_stats *stats, u64 *data) 2332 { 2333 struct macb *bp; 2334 2335 bp = netdev_priv(dev); 2336 gem_update_stats(bp); 2337 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); 2338 } 2339 2340 static int gem_get_sset_count(struct net_device *dev, int sset) 2341 { 2342 switch (sset) { 2343 case ETH_SS_STATS: 2344 return GEM_STATS_LEN; 2345 default: 2346 return -EOPNOTSUPP; 2347 } 2348 } 2349 2350 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2351 { 2352 unsigned int i; 2353 2354 switch (sset) { 2355 case ETH_SS_STATS: 2356 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2357 memcpy(p, gem_statistics[i].stat_string, 2358 ETH_GSTRING_LEN); 2359 break; 2360 } 2361 } 2362 2363 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2364 { 2365 struct macb *bp = netdev_priv(dev); 2366 struct net_device_stats *nstat = &bp->dev->stats; 2367 struct macb_stats *hwstat = &bp->hw_stats.macb; 2368 2369 if (macb_is_gem(bp)) 2370 return gem_get_stats(bp); 2371 2372 /* read stats from hardware */ 2373 macb_update_stats(bp); 2374 2375 /* Convert HW stats into netdevice stats */ 2376 nstat->rx_errors = (hwstat->rx_fcs_errors + 2377 hwstat->rx_align_errors + 2378 hwstat->rx_resource_errors + 2379 hwstat->rx_overruns + 2380 hwstat->rx_oversize_pkts + 2381 hwstat->rx_jabbers + 2382 hwstat->rx_undersize_pkts + 2383 hwstat->rx_length_mismatch); 2384 nstat->tx_errors = (hwstat->tx_late_cols + 2385 hwstat->tx_excessive_cols + 2386 hwstat->tx_underruns + 2387 hwstat->tx_carrier_errors + 2388 hwstat->sqe_test_errors); 2389 nstat->collisions = (hwstat->tx_single_cols + 2390 hwstat->tx_multiple_cols + 2391 hwstat->tx_excessive_cols); 2392 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2393 hwstat->rx_jabbers + 2394 hwstat->rx_undersize_pkts + 2395 hwstat->rx_length_mismatch); 2396 nstat->rx_over_errors = hwstat->rx_resource_errors + 2397 hwstat->rx_overruns; 2398 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2399 nstat->rx_frame_errors = hwstat->rx_align_errors; 2400 nstat->rx_fifo_errors = hwstat->rx_overruns; 2401 /* XXX: What does "missed" mean? */ 2402 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2403 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2404 nstat->tx_fifo_errors = hwstat->tx_underruns; 2405 /* Don't know about heartbeat or window errors... */ 2406 2407 return nstat; 2408 } 2409 2410 static int macb_get_regs_len(struct net_device *netdev) 2411 { 2412 return MACB_GREGS_NBR * sizeof(u32); 2413 } 2414 2415 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2416 void *p) 2417 { 2418 struct macb *bp = netdev_priv(dev); 2419 unsigned int tail, head; 2420 u32 *regs_buff = p; 2421 2422 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2423 | MACB_GREGS_VERSION; 2424 2425 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2426 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2427 2428 regs_buff[0] = macb_readl(bp, NCR); 2429 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2430 regs_buff[2] = macb_readl(bp, NSR); 2431 regs_buff[3] = macb_readl(bp, TSR); 2432 regs_buff[4] = macb_readl(bp, RBQP); 2433 regs_buff[5] = macb_readl(bp, TBQP); 2434 regs_buff[6] = macb_readl(bp, RSR); 2435 regs_buff[7] = macb_readl(bp, IMR); 2436 2437 regs_buff[8] = tail; 2438 regs_buff[9] = head; 2439 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2440 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2441 2442 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2443 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2444 if (macb_is_gem(bp)) 2445 regs_buff[13] = gem_readl(bp, DMACFG); 2446 } 2447 2448 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2449 { 2450 struct macb *bp = netdev_priv(netdev); 2451 2452 wol->supported = 0; 2453 wol->wolopts = 0; 2454 2455 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2456 wol->supported = WAKE_MAGIC; 2457 2458 if (bp->wol & MACB_WOL_ENABLED) 2459 wol->wolopts |= WAKE_MAGIC; 2460 } 2461 } 2462 2463 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2464 { 2465 struct macb *bp = netdev_priv(netdev); 2466 2467 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2468 (wol->wolopts & ~WAKE_MAGIC)) 2469 return -EOPNOTSUPP; 2470 2471 if (wol->wolopts & WAKE_MAGIC) 2472 bp->wol |= MACB_WOL_ENABLED; 2473 else 2474 bp->wol &= ~MACB_WOL_ENABLED; 2475 2476 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2477 2478 return 0; 2479 } 2480 2481 static void macb_get_ringparam(struct net_device *netdev, 2482 struct ethtool_ringparam *ring) 2483 { 2484 struct macb *bp = netdev_priv(netdev); 2485 2486 ring->rx_max_pending = MAX_RX_RING_SIZE; 2487 ring->tx_max_pending = MAX_TX_RING_SIZE; 2488 2489 ring->rx_pending = bp->rx_ring_size; 2490 ring->tx_pending = bp->tx_ring_size; 2491 } 2492 2493 static int macb_set_ringparam(struct net_device *netdev, 2494 struct ethtool_ringparam *ring) 2495 { 2496 struct macb *bp = netdev_priv(netdev); 2497 u32 new_rx_size, new_tx_size; 2498 unsigned int reset = 0; 2499 2500 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2501 return -EINVAL; 2502 2503 new_rx_size = clamp_t(u32, ring->rx_pending, 2504 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 2505 new_rx_size = roundup_pow_of_two(new_rx_size); 2506 2507 new_tx_size = clamp_t(u32, ring->tx_pending, 2508 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 2509 new_tx_size = roundup_pow_of_two(new_tx_size); 2510 2511 if ((new_tx_size == bp->tx_ring_size) && 2512 (new_rx_size == bp->rx_ring_size)) { 2513 /* nothing to do */ 2514 return 0; 2515 } 2516 2517 if (netif_running(bp->dev)) { 2518 reset = 1; 2519 macb_close(bp->dev); 2520 } 2521 2522 bp->rx_ring_size = new_rx_size; 2523 bp->tx_ring_size = new_tx_size; 2524 2525 if (reset) 2526 macb_open(bp->dev); 2527 2528 return 0; 2529 } 2530 2531 #ifdef CONFIG_MACB_USE_HWSTAMP 2532 static unsigned int gem_get_tsu_rate(struct macb *bp) 2533 { 2534 struct clk *tsu_clk; 2535 unsigned int tsu_rate; 2536 2537 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 2538 if (!IS_ERR(tsu_clk)) 2539 tsu_rate = clk_get_rate(tsu_clk); 2540 /* try pclk instead */ 2541 else if (!IS_ERR(bp->pclk)) { 2542 tsu_clk = bp->pclk; 2543 tsu_rate = clk_get_rate(tsu_clk); 2544 } else 2545 return -ENOTSUPP; 2546 return tsu_rate; 2547 } 2548 2549 static s32 gem_get_ptp_max_adj(void) 2550 { 2551 return 64000000; 2552 } 2553 2554 static int gem_get_ts_info(struct net_device *dev, 2555 struct ethtool_ts_info *info) 2556 { 2557 struct macb *bp = netdev_priv(dev); 2558 2559 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 2560 ethtool_op_get_ts_info(dev, info); 2561 return 0; 2562 } 2563 2564 info->so_timestamping = 2565 SOF_TIMESTAMPING_TX_SOFTWARE | 2566 SOF_TIMESTAMPING_RX_SOFTWARE | 2567 SOF_TIMESTAMPING_SOFTWARE | 2568 SOF_TIMESTAMPING_TX_HARDWARE | 2569 SOF_TIMESTAMPING_RX_HARDWARE | 2570 SOF_TIMESTAMPING_RAW_HARDWARE; 2571 info->tx_types = 2572 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 2573 (1 << HWTSTAMP_TX_OFF) | 2574 (1 << HWTSTAMP_TX_ON); 2575 info->rx_filters = 2576 (1 << HWTSTAMP_FILTER_NONE) | 2577 (1 << HWTSTAMP_FILTER_ALL); 2578 2579 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 2580 2581 return 0; 2582 } 2583 2584 static struct macb_ptp_info gem_ptp_info = { 2585 .ptp_init = gem_ptp_init, 2586 .ptp_remove = gem_ptp_remove, 2587 .get_ptp_max_adj = gem_get_ptp_max_adj, 2588 .get_tsu_rate = gem_get_tsu_rate, 2589 .get_ts_info = gem_get_ts_info, 2590 .get_hwtst = gem_get_hwtst, 2591 .set_hwtst = gem_set_hwtst, 2592 }; 2593 #endif 2594 2595 static int macb_get_ts_info(struct net_device *netdev, 2596 struct ethtool_ts_info *info) 2597 { 2598 struct macb *bp = netdev_priv(netdev); 2599 2600 if (bp->ptp_info) 2601 return bp->ptp_info->get_ts_info(netdev, info); 2602 2603 return ethtool_op_get_ts_info(netdev, info); 2604 } 2605 2606 static const struct ethtool_ops macb_ethtool_ops = { 2607 .get_regs_len = macb_get_regs_len, 2608 .get_regs = macb_get_regs, 2609 .get_link = ethtool_op_get_link, 2610 .get_ts_info = ethtool_op_get_ts_info, 2611 .get_wol = macb_get_wol, 2612 .set_wol = macb_set_wol, 2613 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2614 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2615 .get_ringparam = macb_get_ringparam, 2616 .set_ringparam = macb_set_ringparam, 2617 }; 2618 2619 static const struct ethtool_ops gem_ethtool_ops = { 2620 .get_regs_len = macb_get_regs_len, 2621 .get_regs = macb_get_regs, 2622 .get_link = ethtool_op_get_link, 2623 .get_ts_info = macb_get_ts_info, 2624 .get_ethtool_stats = gem_get_ethtool_stats, 2625 .get_strings = gem_get_ethtool_strings, 2626 .get_sset_count = gem_get_sset_count, 2627 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2628 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2629 .get_ringparam = macb_get_ringparam, 2630 .set_ringparam = macb_set_ringparam, 2631 }; 2632 2633 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2634 { 2635 struct phy_device *phydev = dev->phydev; 2636 struct macb *bp = netdev_priv(dev); 2637 2638 if (!netif_running(dev)) 2639 return -EINVAL; 2640 2641 if (!phydev) 2642 return -ENODEV; 2643 2644 if (!bp->ptp_info) 2645 return phy_mii_ioctl(phydev, rq, cmd); 2646 2647 switch (cmd) { 2648 case SIOCSHWTSTAMP: 2649 return bp->ptp_info->set_hwtst(dev, rq, cmd); 2650 case SIOCGHWTSTAMP: 2651 return bp->ptp_info->get_hwtst(dev, rq); 2652 default: 2653 return phy_mii_ioctl(phydev, rq, cmd); 2654 } 2655 } 2656 2657 static int macb_set_features(struct net_device *netdev, 2658 netdev_features_t features) 2659 { 2660 struct macb *bp = netdev_priv(netdev); 2661 netdev_features_t changed = features ^ netdev->features; 2662 2663 /* TX checksum offload */ 2664 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { 2665 u32 dmacfg; 2666 2667 dmacfg = gem_readl(bp, DMACFG); 2668 if (features & NETIF_F_HW_CSUM) 2669 dmacfg |= GEM_BIT(TXCOEN); 2670 else 2671 dmacfg &= ~GEM_BIT(TXCOEN); 2672 gem_writel(bp, DMACFG, dmacfg); 2673 } 2674 2675 /* RX checksum offload */ 2676 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { 2677 u32 netcfg; 2678 2679 netcfg = gem_readl(bp, NCFGR); 2680 if (features & NETIF_F_RXCSUM && 2681 !(netdev->flags & IFF_PROMISC)) 2682 netcfg |= GEM_BIT(RXCOEN); 2683 else 2684 netcfg &= ~GEM_BIT(RXCOEN); 2685 gem_writel(bp, NCFGR, netcfg); 2686 } 2687 2688 return 0; 2689 } 2690 2691 static const struct net_device_ops macb_netdev_ops = { 2692 .ndo_open = macb_open, 2693 .ndo_stop = macb_close, 2694 .ndo_start_xmit = macb_start_xmit, 2695 .ndo_set_rx_mode = macb_set_rx_mode, 2696 .ndo_get_stats = macb_get_stats, 2697 .ndo_do_ioctl = macb_ioctl, 2698 .ndo_validate_addr = eth_validate_addr, 2699 .ndo_change_mtu = macb_change_mtu, 2700 .ndo_set_mac_address = eth_mac_addr, 2701 #ifdef CONFIG_NET_POLL_CONTROLLER 2702 .ndo_poll_controller = macb_poll_controller, 2703 #endif 2704 .ndo_set_features = macb_set_features, 2705 .ndo_features_check = macb_features_check, 2706 }; 2707 2708 /* Configure peripheral capabilities according to device tree 2709 * and integration options used 2710 */ 2711 static void macb_configure_caps(struct macb *bp, 2712 const struct macb_config *dt_conf) 2713 { 2714 u32 dcfg; 2715 2716 if (dt_conf) 2717 bp->caps = dt_conf->caps; 2718 2719 if (hw_is_gem(bp->regs, bp->native_io)) { 2720 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2721 2722 dcfg = gem_readl(bp, DCFG1); 2723 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 2724 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 2725 dcfg = gem_readl(bp, DCFG2); 2726 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 2727 bp->caps |= MACB_CAPS_FIFO_MODE; 2728 #ifdef CONFIG_MACB_USE_HWSTAMP 2729 if (gem_has_ptp(bp)) { 2730 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 2731 pr_err("GEM doesn't support hardware ptp.\n"); 2732 else { 2733 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 2734 bp->ptp_info = &gem_ptp_info; 2735 } 2736 } 2737 #endif 2738 } 2739 2740 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 2741 } 2742 2743 static void macb_probe_queues(void __iomem *mem, 2744 bool native_io, 2745 unsigned int *queue_mask, 2746 unsigned int *num_queues) 2747 { 2748 unsigned int hw_q; 2749 2750 *queue_mask = 0x1; 2751 *num_queues = 1; 2752 2753 /* is it macb or gem ? 2754 * 2755 * We need to read directly from the hardware here because 2756 * we are early in the probe process and don't have the 2757 * MACB_CAPS_MACB_IS_GEM flag positioned 2758 */ 2759 if (!hw_is_gem(mem, native_io)) 2760 return; 2761 2762 /* bit 0 is never set but queue 0 always exists */ 2763 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; 2764 2765 *queue_mask |= 0x1; 2766 2767 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 2768 if (*queue_mask & (1 << hw_q)) 2769 (*num_queues)++; 2770 } 2771 2772 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 2773 struct clk **hclk, struct clk **tx_clk, 2774 struct clk **rx_clk) 2775 { 2776 struct macb_platform_data *pdata; 2777 int err; 2778 2779 pdata = dev_get_platdata(&pdev->dev); 2780 if (pdata) { 2781 *pclk = pdata->pclk; 2782 *hclk = pdata->hclk; 2783 } else { 2784 *pclk = devm_clk_get(&pdev->dev, "pclk"); 2785 *hclk = devm_clk_get(&pdev->dev, "hclk"); 2786 } 2787 2788 if (IS_ERR(*pclk)) { 2789 err = PTR_ERR(*pclk); 2790 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 2791 return err; 2792 } 2793 2794 if (IS_ERR(*hclk)) { 2795 err = PTR_ERR(*hclk); 2796 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 2797 return err; 2798 } 2799 2800 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 2801 if (IS_ERR(*tx_clk)) 2802 *tx_clk = NULL; 2803 2804 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); 2805 if (IS_ERR(*rx_clk)) 2806 *rx_clk = NULL; 2807 2808 err = clk_prepare_enable(*pclk); 2809 if (err) { 2810 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 2811 return err; 2812 } 2813 2814 err = clk_prepare_enable(*hclk); 2815 if (err) { 2816 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 2817 goto err_disable_pclk; 2818 } 2819 2820 err = clk_prepare_enable(*tx_clk); 2821 if (err) { 2822 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 2823 goto err_disable_hclk; 2824 } 2825 2826 err = clk_prepare_enable(*rx_clk); 2827 if (err) { 2828 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 2829 goto err_disable_txclk; 2830 } 2831 2832 return 0; 2833 2834 err_disable_txclk: 2835 clk_disable_unprepare(*tx_clk); 2836 2837 err_disable_hclk: 2838 clk_disable_unprepare(*hclk); 2839 2840 err_disable_pclk: 2841 clk_disable_unprepare(*pclk); 2842 2843 return err; 2844 } 2845 2846 static int macb_init(struct platform_device *pdev) 2847 { 2848 struct net_device *dev = platform_get_drvdata(pdev); 2849 unsigned int hw_q, q; 2850 struct macb *bp = netdev_priv(dev); 2851 struct macb_queue *queue; 2852 int err; 2853 u32 val; 2854 2855 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 2856 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 2857 2858 /* set the queue register mapping once for all: queue0 has a special 2859 * register mapping but we don't want to test the queue index then 2860 * compute the corresponding register offset at run time. 2861 */ 2862 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 2863 if (!(bp->queue_mask & (1 << hw_q))) 2864 continue; 2865 2866 queue = &bp->queues[q]; 2867 queue->bp = bp; 2868 if (hw_q) { 2869 queue->ISR = GEM_ISR(hw_q - 1); 2870 queue->IER = GEM_IER(hw_q - 1); 2871 queue->IDR = GEM_IDR(hw_q - 1); 2872 queue->IMR = GEM_IMR(hw_q - 1); 2873 queue->TBQP = GEM_TBQP(hw_q - 1); 2874 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2875 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2876 queue->TBQPH = GEM_TBQPH(hw_q - 1); 2877 #endif 2878 } else { 2879 /* queue0 uses legacy registers */ 2880 queue->ISR = MACB_ISR; 2881 queue->IER = MACB_IER; 2882 queue->IDR = MACB_IDR; 2883 queue->IMR = MACB_IMR; 2884 queue->TBQP = MACB_TBQP; 2885 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2886 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2887 queue->TBQPH = MACB_TBQPH; 2888 #endif 2889 } 2890 2891 /* get irq: here we use the linux queue index, not the hardware 2892 * queue index. the queue irq definitions in the device tree 2893 * must remove the optional gaps that could exist in the 2894 * hardware queue mask. 2895 */ 2896 queue->irq = platform_get_irq(pdev, q); 2897 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 2898 IRQF_SHARED, dev->name, queue); 2899 if (err) { 2900 dev_err(&pdev->dev, 2901 "Unable to request IRQ %d (error %d)\n", 2902 queue->irq, err); 2903 return err; 2904 } 2905 2906 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 2907 q++; 2908 } 2909 2910 dev->netdev_ops = &macb_netdev_ops; 2911 netif_napi_add(dev, &bp->napi, macb_poll, 64); 2912 2913 /* setup appropriated routines according to adapter type */ 2914 if (macb_is_gem(bp)) { 2915 bp->max_tx_length = GEM_MAX_TX_LEN; 2916 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 2917 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 2918 bp->macbgem_ops.mog_init_rings = gem_init_rings; 2919 bp->macbgem_ops.mog_rx = gem_rx; 2920 dev->ethtool_ops = &gem_ethtool_ops; 2921 } else { 2922 bp->max_tx_length = MACB_MAX_TX_LEN; 2923 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 2924 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 2925 bp->macbgem_ops.mog_init_rings = macb_init_rings; 2926 bp->macbgem_ops.mog_rx = macb_rx; 2927 dev->ethtool_ops = &macb_ethtool_ops; 2928 } 2929 2930 /* Set features */ 2931 dev->hw_features = NETIF_F_SG; 2932 2933 /* Check LSO capability */ 2934 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 2935 dev->hw_features |= MACB_NETIF_LSO; 2936 2937 /* Checksum offload is only available on gem with packet buffer */ 2938 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 2939 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2940 if (bp->caps & MACB_CAPS_SG_DISABLED) 2941 dev->hw_features &= ~NETIF_F_SG; 2942 dev->features = dev->hw_features; 2943 2944 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 2945 val = 0; 2946 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 2947 val = GEM_BIT(RGMII); 2948 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 2949 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 2950 val = MACB_BIT(RMII); 2951 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 2952 val = MACB_BIT(MII); 2953 2954 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 2955 val |= MACB_BIT(CLKEN); 2956 2957 macb_or_gem_writel(bp, USRIO, val); 2958 } 2959 2960 /* Set MII management clock divider */ 2961 val = macb_mdc_clk_div(bp); 2962 val |= macb_dbw(bp); 2963 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 2964 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 2965 macb_writel(bp, NCFGR, val); 2966 2967 return 0; 2968 } 2969 2970 #if defined(CONFIG_OF) 2971 /* 1518 rounded up */ 2972 #define AT91ETHER_MAX_RBUFF_SZ 0x600 2973 /* max number of receive buffers */ 2974 #define AT91ETHER_MAX_RX_DESCR 9 2975 2976 /* Initialize and start the Receiver and Transmit subsystems */ 2977 static int at91ether_start(struct net_device *dev) 2978 { 2979 struct macb *lp = netdev_priv(dev); 2980 struct macb_dma_desc *desc; 2981 dma_addr_t addr; 2982 u32 ctl; 2983 int i; 2984 2985 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 2986 (AT91ETHER_MAX_RX_DESCR * 2987 macb_dma_desc_get_size(lp)), 2988 &lp->rx_ring_dma, GFP_KERNEL); 2989 if (!lp->rx_ring) 2990 return -ENOMEM; 2991 2992 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 2993 AT91ETHER_MAX_RX_DESCR * 2994 AT91ETHER_MAX_RBUFF_SZ, 2995 &lp->rx_buffers_dma, GFP_KERNEL); 2996 if (!lp->rx_buffers) { 2997 dma_free_coherent(&lp->pdev->dev, 2998 AT91ETHER_MAX_RX_DESCR * 2999 macb_dma_desc_get_size(lp), 3000 lp->rx_ring, lp->rx_ring_dma); 3001 lp->rx_ring = NULL; 3002 return -ENOMEM; 3003 } 3004 3005 addr = lp->rx_buffers_dma; 3006 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 3007 desc = macb_rx_desc(lp, i); 3008 macb_set_addr(lp, desc, addr); 3009 desc->ctrl = 0; 3010 addr += AT91ETHER_MAX_RBUFF_SZ; 3011 } 3012 3013 /* Set the Wrap bit on the last descriptor */ 3014 desc->addr |= MACB_BIT(RX_WRAP); 3015 3016 /* Reset buffer index */ 3017 lp->rx_tail = 0; 3018 3019 /* Program address of descriptor list in Rx Buffer Queue register */ 3020 macb_writel(lp, RBQP, lp->rx_ring_dma); 3021 3022 /* Enable Receive and Transmit */ 3023 ctl = macb_readl(lp, NCR); 3024 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 3025 3026 return 0; 3027 } 3028 3029 /* Open the ethernet interface */ 3030 static int at91ether_open(struct net_device *dev) 3031 { 3032 struct macb *lp = netdev_priv(dev); 3033 u32 ctl; 3034 int ret; 3035 3036 /* Clear internal statistics */ 3037 ctl = macb_readl(lp, NCR); 3038 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 3039 3040 macb_set_hwaddr(lp); 3041 3042 ret = at91ether_start(dev); 3043 if (ret) 3044 return ret; 3045 3046 /* Enable MAC interrupts */ 3047 macb_writel(lp, IER, MACB_BIT(RCOMP) | 3048 MACB_BIT(RXUBR) | 3049 MACB_BIT(ISR_TUND) | 3050 MACB_BIT(ISR_RLE) | 3051 MACB_BIT(TCOMP) | 3052 MACB_BIT(ISR_ROVR) | 3053 MACB_BIT(HRESP)); 3054 3055 /* schedule a link state check */ 3056 phy_start(dev->phydev); 3057 3058 netif_start_queue(dev); 3059 3060 return 0; 3061 } 3062 3063 /* Close the interface */ 3064 static int at91ether_close(struct net_device *dev) 3065 { 3066 struct macb *lp = netdev_priv(dev); 3067 u32 ctl; 3068 3069 /* Disable Receiver and Transmitter */ 3070 ctl = macb_readl(lp, NCR); 3071 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 3072 3073 /* Disable MAC interrupts */ 3074 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 3075 MACB_BIT(RXUBR) | 3076 MACB_BIT(ISR_TUND) | 3077 MACB_BIT(ISR_RLE) | 3078 MACB_BIT(TCOMP) | 3079 MACB_BIT(ISR_ROVR) | 3080 MACB_BIT(HRESP)); 3081 3082 netif_stop_queue(dev); 3083 3084 dma_free_coherent(&lp->pdev->dev, 3085 AT91ETHER_MAX_RX_DESCR * 3086 macb_dma_desc_get_size(lp), 3087 lp->rx_ring, lp->rx_ring_dma); 3088 lp->rx_ring = NULL; 3089 3090 dma_free_coherent(&lp->pdev->dev, 3091 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, 3092 lp->rx_buffers, lp->rx_buffers_dma); 3093 lp->rx_buffers = NULL; 3094 3095 return 0; 3096 } 3097 3098 /* Transmit packet */ 3099 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) 3100 { 3101 struct macb *lp = netdev_priv(dev); 3102 3103 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 3104 netif_stop_queue(dev); 3105 3106 /* Store packet information (to free when Tx completed) */ 3107 lp->skb = skb; 3108 lp->skb_length = skb->len; 3109 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 3110 DMA_TO_DEVICE); 3111 if (dma_mapping_error(NULL, lp->skb_physaddr)) { 3112 dev_kfree_skb_any(skb); 3113 dev->stats.tx_dropped++; 3114 netdev_err(dev, "%s: DMA mapping error\n", __func__); 3115 return NETDEV_TX_OK; 3116 } 3117 3118 /* Set address of the data in the Transmit Address register */ 3119 macb_writel(lp, TAR, lp->skb_physaddr); 3120 /* Set length of the packet in the Transmit Control register */ 3121 macb_writel(lp, TCR, skb->len); 3122 3123 } else { 3124 netdev_err(dev, "%s called, but device is busy!\n", __func__); 3125 return NETDEV_TX_BUSY; 3126 } 3127 3128 return NETDEV_TX_OK; 3129 } 3130 3131 /* Extract received frame from buffer descriptors and sent to upper layers. 3132 * (Called from interrupt context) 3133 */ 3134 static void at91ether_rx(struct net_device *dev) 3135 { 3136 struct macb *lp = netdev_priv(dev); 3137 struct macb_dma_desc *desc; 3138 unsigned char *p_recv; 3139 struct sk_buff *skb; 3140 unsigned int pktlen; 3141 3142 desc = macb_rx_desc(lp, lp->rx_tail); 3143 while (desc->addr & MACB_BIT(RX_USED)) { 3144 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 3145 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 3146 skb = netdev_alloc_skb(dev, pktlen + 2); 3147 if (skb) { 3148 skb_reserve(skb, 2); 3149 skb_put_data(skb, p_recv, pktlen); 3150 3151 skb->protocol = eth_type_trans(skb, dev); 3152 dev->stats.rx_packets++; 3153 dev->stats.rx_bytes += pktlen; 3154 netif_rx(skb); 3155 } else { 3156 dev->stats.rx_dropped++; 3157 } 3158 3159 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 3160 dev->stats.multicast++; 3161 3162 /* reset ownership bit */ 3163 desc->addr &= ~MACB_BIT(RX_USED); 3164 3165 /* wrap after last buffer */ 3166 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 3167 lp->rx_tail = 0; 3168 else 3169 lp->rx_tail++; 3170 3171 desc = macb_rx_desc(lp, lp->rx_tail); 3172 } 3173 } 3174 3175 /* MAC interrupt handler */ 3176 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 3177 { 3178 struct net_device *dev = dev_id; 3179 struct macb *lp = netdev_priv(dev); 3180 u32 intstatus, ctl; 3181 3182 /* MAC Interrupt Status register indicates what interrupts are pending. 3183 * It is automatically cleared once read. 3184 */ 3185 intstatus = macb_readl(lp, ISR); 3186 3187 /* Receive complete */ 3188 if (intstatus & MACB_BIT(RCOMP)) 3189 at91ether_rx(dev); 3190 3191 /* Transmit complete */ 3192 if (intstatus & MACB_BIT(TCOMP)) { 3193 /* The TCOM bit is set even if the transmission failed */ 3194 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 3195 dev->stats.tx_errors++; 3196 3197 if (lp->skb) { 3198 dev_kfree_skb_irq(lp->skb); 3199 lp->skb = NULL; 3200 dma_unmap_single(NULL, lp->skb_physaddr, 3201 lp->skb_length, DMA_TO_DEVICE); 3202 dev->stats.tx_packets++; 3203 dev->stats.tx_bytes += lp->skb_length; 3204 } 3205 netif_wake_queue(dev); 3206 } 3207 3208 /* Work-around for EMAC Errata section 41.3.1 */ 3209 if (intstatus & MACB_BIT(RXUBR)) { 3210 ctl = macb_readl(lp, NCR); 3211 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 3212 wmb(); 3213 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 3214 } 3215 3216 if (intstatus & MACB_BIT(ISR_ROVR)) 3217 netdev_err(dev, "ROVR error\n"); 3218 3219 return IRQ_HANDLED; 3220 } 3221 3222 #ifdef CONFIG_NET_POLL_CONTROLLER 3223 static void at91ether_poll_controller(struct net_device *dev) 3224 { 3225 unsigned long flags; 3226 3227 local_irq_save(flags); 3228 at91ether_interrupt(dev->irq, dev); 3229 local_irq_restore(flags); 3230 } 3231 #endif 3232 3233 static const struct net_device_ops at91ether_netdev_ops = { 3234 .ndo_open = at91ether_open, 3235 .ndo_stop = at91ether_close, 3236 .ndo_start_xmit = at91ether_start_xmit, 3237 .ndo_get_stats = macb_get_stats, 3238 .ndo_set_rx_mode = macb_set_rx_mode, 3239 .ndo_set_mac_address = eth_mac_addr, 3240 .ndo_do_ioctl = macb_ioctl, 3241 .ndo_validate_addr = eth_validate_addr, 3242 #ifdef CONFIG_NET_POLL_CONTROLLER 3243 .ndo_poll_controller = at91ether_poll_controller, 3244 #endif 3245 }; 3246 3247 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 3248 struct clk **hclk, struct clk **tx_clk, 3249 struct clk **rx_clk) 3250 { 3251 int err; 3252 3253 *hclk = NULL; 3254 *tx_clk = NULL; 3255 *rx_clk = NULL; 3256 3257 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 3258 if (IS_ERR(*pclk)) 3259 return PTR_ERR(*pclk); 3260 3261 err = clk_prepare_enable(*pclk); 3262 if (err) { 3263 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 3264 return err; 3265 } 3266 3267 return 0; 3268 } 3269 3270 static int at91ether_init(struct platform_device *pdev) 3271 { 3272 struct net_device *dev = platform_get_drvdata(pdev); 3273 struct macb *bp = netdev_priv(dev); 3274 int err; 3275 u32 reg; 3276 3277 dev->netdev_ops = &at91ether_netdev_ops; 3278 dev->ethtool_ops = &macb_ethtool_ops; 3279 3280 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 3281 0, dev->name, dev); 3282 if (err) 3283 return err; 3284 3285 macb_writel(bp, NCR, 0); 3286 3287 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); 3288 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 3289 reg |= MACB_BIT(RM9200_RMII); 3290 3291 macb_writel(bp, NCFGR, reg); 3292 3293 return 0; 3294 } 3295 3296 static const struct macb_config at91sam9260_config = { 3297 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3298 .clk_init = macb_clk_init, 3299 .init = macb_init, 3300 }; 3301 3302 static const struct macb_config pc302gem_config = { 3303 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 3304 .dma_burst_length = 16, 3305 .clk_init = macb_clk_init, 3306 .init = macb_init, 3307 }; 3308 3309 static const struct macb_config sama5d2_config = { 3310 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3311 .dma_burst_length = 16, 3312 .clk_init = macb_clk_init, 3313 .init = macb_init, 3314 }; 3315 3316 static const struct macb_config sama5d3_config = { 3317 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 3318 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 3319 .dma_burst_length = 16, 3320 .clk_init = macb_clk_init, 3321 .init = macb_init, 3322 .jumbo_max_len = 10240, 3323 }; 3324 3325 static const struct macb_config sama5d4_config = { 3326 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3327 .dma_burst_length = 4, 3328 .clk_init = macb_clk_init, 3329 .init = macb_init, 3330 }; 3331 3332 static const struct macb_config emac_config = { 3333 .clk_init = at91ether_clk_init, 3334 .init = at91ether_init, 3335 }; 3336 3337 static const struct macb_config np4_config = { 3338 .caps = MACB_CAPS_USRIO_DISABLED, 3339 .clk_init = macb_clk_init, 3340 .init = macb_init, 3341 }; 3342 3343 static const struct macb_config zynqmp_config = { 3344 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3345 MACB_CAPS_JUMBO | 3346 MACB_CAPS_GEM_HAS_PTP, 3347 .dma_burst_length = 16, 3348 .clk_init = macb_clk_init, 3349 .init = macb_init, 3350 .jumbo_max_len = 10240, 3351 }; 3352 3353 static const struct macb_config zynq_config = { 3354 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 3355 .dma_burst_length = 16, 3356 .clk_init = macb_clk_init, 3357 .init = macb_init, 3358 }; 3359 3360 static const struct of_device_id macb_dt_ids[] = { 3361 { .compatible = "cdns,at32ap7000-macb" }, 3362 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 3363 { .compatible = "cdns,macb" }, 3364 { .compatible = "cdns,np4-macb", .data = &np4_config }, 3365 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 3366 { .compatible = "cdns,gem", .data = &pc302gem_config }, 3367 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 3368 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 3369 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 3370 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 3371 { .compatible = "cdns,emac", .data = &emac_config }, 3372 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 3373 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 3374 { /* sentinel */ } 3375 }; 3376 MODULE_DEVICE_TABLE(of, macb_dt_ids); 3377 #endif /* CONFIG_OF */ 3378 3379 static const struct macb_config default_gem_config = { 3380 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3381 MACB_CAPS_JUMBO | 3382 MACB_CAPS_GEM_HAS_PTP, 3383 .dma_burst_length = 16, 3384 .clk_init = macb_clk_init, 3385 .init = macb_init, 3386 .jumbo_max_len = 10240, 3387 }; 3388 3389 static int macb_probe(struct platform_device *pdev) 3390 { 3391 const struct macb_config *macb_config = &default_gem_config; 3392 int (*clk_init)(struct platform_device *, struct clk **, 3393 struct clk **, struct clk **, struct clk **) 3394 = macb_config->clk_init; 3395 int (*init)(struct platform_device *) = macb_config->init; 3396 struct device_node *np = pdev->dev.of_node; 3397 struct device_node *phy_node; 3398 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 3399 unsigned int queue_mask, num_queues; 3400 struct macb_platform_data *pdata; 3401 bool native_io; 3402 struct phy_device *phydev; 3403 struct net_device *dev; 3404 struct resource *regs; 3405 void __iomem *mem; 3406 const char *mac; 3407 struct macb *bp; 3408 int err; 3409 3410 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3411 mem = devm_ioremap_resource(&pdev->dev, regs); 3412 if (IS_ERR(mem)) 3413 return PTR_ERR(mem); 3414 3415 if (np) { 3416 const struct of_device_id *match; 3417 3418 match = of_match_node(macb_dt_ids, np); 3419 if (match && match->data) { 3420 macb_config = match->data; 3421 clk_init = macb_config->clk_init; 3422 init = macb_config->init; 3423 } 3424 } 3425 3426 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); 3427 if (err) 3428 return err; 3429 3430 native_io = hw_is_native_io(mem); 3431 3432 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 3433 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 3434 if (!dev) { 3435 err = -ENOMEM; 3436 goto err_disable_clocks; 3437 } 3438 3439 dev->base_addr = regs->start; 3440 3441 SET_NETDEV_DEV(dev, &pdev->dev); 3442 3443 bp = netdev_priv(dev); 3444 bp->pdev = pdev; 3445 bp->dev = dev; 3446 bp->regs = mem; 3447 bp->native_io = native_io; 3448 if (native_io) { 3449 bp->macb_reg_readl = hw_readl_native; 3450 bp->macb_reg_writel = hw_writel_native; 3451 } else { 3452 bp->macb_reg_readl = hw_readl; 3453 bp->macb_reg_writel = hw_writel; 3454 } 3455 bp->num_queues = num_queues; 3456 bp->queue_mask = queue_mask; 3457 if (macb_config) 3458 bp->dma_burst_length = macb_config->dma_burst_length; 3459 bp->pclk = pclk; 3460 bp->hclk = hclk; 3461 bp->tx_clk = tx_clk; 3462 bp->rx_clk = rx_clk; 3463 if (macb_config) 3464 bp->jumbo_max_len = macb_config->jumbo_max_len; 3465 3466 bp->wol = 0; 3467 if (of_get_property(np, "magic-packet", NULL)) 3468 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 3469 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 3470 3471 spin_lock_init(&bp->lock); 3472 3473 /* setup capabilities */ 3474 macb_configure_caps(bp, macb_config); 3475 3476 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3477 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 3478 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 3479 bp->hw_dma_cap |= HW_DMA_CAP_64B; 3480 } 3481 #endif 3482 platform_set_drvdata(pdev, dev); 3483 3484 dev->irq = platform_get_irq(pdev, 0); 3485 if (dev->irq < 0) { 3486 err = dev->irq; 3487 goto err_out_free_netdev; 3488 } 3489 3490 /* MTU range: 68 - 1500 or 10240 */ 3491 dev->min_mtu = GEM_MTU_MIN_SIZE; 3492 if (bp->caps & MACB_CAPS_JUMBO) 3493 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 3494 else 3495 dev->max_mtu = ETH_DATA_LEN; 3496 3497 mac = of_get_mac_address(np); 3498 if (mac) 3499 ether_addr_copy(bp->dev->dev_addr, mac); 3500 else 3501 macb_get_hwaddr(bp); 3502 3503 /* Power up the PHY if there is a GPIO reset */ 3504 phy_node = of_get_next_available_child(np, NULL); 3505 if (phy_node) { 3506 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); 3507 3508 if (gpio_is_valid(gpio)) { 3509 bp->reset_gpio = gpio_to_desc(gpio); 3510 gpiod_direction_output(bp->reset_gpio, 1); 3511 } 3512 } 3513 of_node_put(phy_node); 3514 3515 err = of_get_phy_mode(np); 3516 if (err < 0) { 3517 pdata = dev_get_platdata(&pdev->dev); 3518 if (pdata && pdata->is_rmii) 3519 bp->phy_interface = PHY_INTERFACE_MODE_RMII; 3520 else 3521 bp->phy_interface = PHY_INTERFACE_MODE_MII; 3522 } else { 3523 bp->phy_interface = err; 3524 } 3525 3526 /* IP specific init */ 3527 err = init(pdev); 3528 if (err) 3529 goto err_out_free_netdev; 3530 3531 err = macb_mii_init(bp); 3532 if (err) 3533 goto err_out_free_netdev; 3534 3535 phydev = dev->phydev; 3536 3537 netif_carrier_off(dev); 3538 3539 err = register_netdev(dev); 3540 if (err) { 3541 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 3542 goto err_out_unregister_mdio; 3543 } 3544 3545 phy_attached_info(phydev); 3546 3547 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 3548 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 3549 dev->base_addr, dev->irq, dev->dev_addr); 3550 3551 return 0; 3552 3553 err_out_unregister_mdio: 3554 phy_disconnect(dev->phydev); 3555 mdiobus_unregister(bp->mii_bus); 3556 of_node_put(bp->phy_node); 3557 if (np && of_phy_is_fixed_link(np)) 3558 of_phy_deregister_fixed_link(np); 3559 mdiobus_free(bp->mii_bus); 3560 3561 /* Shutdown the PHY if there is a GPIO reset */ 3562 if (bp->reset_gpio) 3563 gpiod_set_value(bp->reset_gpio, 0); 3564 3565 err_out_free_netdev: 3566 free_netdev(dev); 3567 3568 err_disable_clocks: 3569 clk_disable_unprepare(tx_clk); 3570 clk_disable_unprepare(hclk); 3571 clk_disable_unprepare(pclk); 3572 clk_disable_unprepare(rx_clk); 3573 3574 return err; 3575 } 3576 3577 static int macb_remove(struct platform_device *pdev) 3578 { 3579 struct net_device *dev; 3580 struct macb *bp; 3581 struct device_node *np = pdev->dev.of_node; 3582 3583 dev = platform_get_drvdata(pdev); 3584 3585 if (dev) { 3586 bp = netdev_priv(dev); 3587 if (dev->phydev) 3588 phy_disconnect(dev->phydev); 3589 mdiobus_unregister(bp->mii_bus); 3590 if (np && of_phy_is_fixed_link(np)) 3591 of_phy_deregister_fixed_link(np); 3592 dev->phydev = NULL; 3593 mdiobus_free(bp->mii_bus); 3594 3595 /* Shutdown the PHY if there is a GPIO reset */ 3596 if (bp->reset_gpio) 3597 gpiod_set_value(bp->reset_gpio, 0); 3598 3599 unregister_netdev(dev); 3600 clk_disable_unprepare(bp->tx_clk); 3601 clk_disable_unprepare(bp->hclk); 3602 clk_disable_unprepare(bp->pclk); 3603 clk_disable_unprepare(bp->rx_clk); 3604 of_node_put(bp->phy_node); 3605 free_netdev(dev); 3606 } 3607 3608 return 0; 3609 } 3610 3611 static int __maybe_unused macb_suspend(struct device *dev) 3612 { 3613 struct platform_device *pdev = to_platform_device(dev); 3614 struct net_device *netdev = platform_get_drvdata(pdev); 3615 struct macb *bp = netdev_priv(netdev); 3616 3617 netif_carrier_off(netdev); 3618 netif_device_detach(netdev); 3619 3620 if (bp->wol & MACB_WOL_ENABLED) { 3621 macb_writel(bp, IER, MACB_BIT(WOL)); 3622 macb_writel(bp, WOL, MACB_BIT(MAG)); 3623 enable_irq_wake(bp->queues[0].irq); 3624 } else { 3625 clk_disable_unprepare(bp->tx_clk); 3626 clk_disable_unprepare(bp->hclk); 3627 clk_disable_unprepare(bp->pclk); 3628 clk_disable_unprepare(bp->rx_clk); 3629 } 3630 3631 return 0; 3632 } 3633 3634 static int __maybe_unused macb_resume(struct device *dev) 3635 { 3636 struct platform_device *pdev = to_platform_device(dev); 3637 struct net_device *netdev = platform_get_drvdata(pdev); 3638 struct macb *bp = netdev_priv(netdev); 3639 3640 if (bp->wol & MACB_WOL_ENABLED) { 3641 macb_writel(bp, IDR, MACB_BIT(WOL)); 3642 macb_writel(bp, WOL, 0); 3643 disable_irq_wake(bp->queues[0].irq); 3644 } else { 3645 clk_prepare_enable(bp->pclk); 3646 clk_prepare_enable(bp->hclk); 3647 clk_prepare_enable(bp->tx_clk); 3648 clk_prepare_enable(bp->rx_clk); 3649 } 3650 3651 netif_device_attach(netdev); 3652 3653 return 0; 3654 } 3655 3656 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); 3657 3658 static struct platform_driver macb_driver = { 3659 .probe = macb_probe, 3660 .remove = macb_remove, 3661 .driver = { 3662 .name = "macb", 3663 .of_match_table = of_match_ptr(macb_dt_ids), 3664 .pm = &macb_pm_ops, 3665 }, 3666 }; 3667 3668 module_platform_driver(macb_driver); 3669 3670 MODULE_LICENSE("GPL"); 3671 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 3672 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 3673 MODULE_ALIAS("platform:macb"); 3674