1 /* 2 * Cadence MACB/GEM Ethernet Controller driver 3 * 4 * Copyright (C) 2004-2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 #include <linux/clk.h> 13 #include <linux/crc32.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <linux/circ_buf.h> 19 #include <linux/slab.h> 20 #include <linux/init.h> 21 #include <linux/io.h> 22 #include <linux/gpio.h> 23 #include <linux/gpio/consumer.h> 24 #include <linux/interrupt.h> 25 #include <linux/netdevice.h> 26 #include <linux/etherdevice.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/platform_data/macb.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy.h> 31 #include <linux/of.h> 32 #include <linux/of_device.h> 33 #include <linux/of_gpio.h> 34 #include <linux/of_mdio.h> 35 #include <linux/of_net.h> 36 #include <linux/ip.h> 37 #include <linux/udp.h> 38 #include <linux/tcp.h> 39 #include "macb.h" 40 41 #define MACB_RX_BUFFER_SIZE 128 42 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 43 44 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 45 #define MIN_RX_RING_SIZE 64 46 #define MAX_RX_RING_SIZE 8192 47 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 48 * (bp)->rx_ring_size) 49 50 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 51 #define MIN_TX_RING_SIZE 64 52 #define MAX_TX_RING_SIZE 4096 53 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 54 * (bp)->tx_ring_size) 55 56 /* level of occupied TX descriptors under which we wake up TX process */ 57 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 58 59 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 60 | MACB_BIT(ISR_ROVR)) 61 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 62 | MACB_BIT(ISR_RLE) \ 63 | MACB_BIT(TXERR)) 64 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 65 66 /* Max length of transmit frame must be a multiple of 8 bytes */ 67 #define MACB_TX_LEN_ALIGN 8 68 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 69 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 70 71 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 72 #define MACB_NETIF_LSO NETIF_F_TSO 73 74 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 75 #define MACB_WOL_ENABLED (0x1 << 1) 76 77 /* Graceful stop timeouts in us. We should allow up to 78 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 79 */ 80 #define MACB_HALT_TIMEOUT 1230 81 82 /* DMA buffer descriptor might be different size 83 * depends on hardware configuration: 84 * 85 * 1. dma address width 32 bits: 86 * word 1: 32 bit address of Data Buffer 87 * word 2: control 88 * 89 * 2. dma address width 64 bits: 90 * word 1: 32 bit address of Data Buffer 91 * word 2: control 92 * word 3: upper 32 bit address of Data Buffer 93 * word 4: unused 94 * 95 * 3. dma address width 32 bits with hardware timestamping: 96 * word 1: 32 bit address of Data Buffer 97 * word 2: control 98 * word 3: timestamp word 1 99 * word 4: timestamp word 2 100 * 101 * 4. dma address width 64 bits with hardware timestamping: 102 * word 1: 32 bit address of Data Buffer 103 * word 2: control 104 * word 3: upper 32 bit address of Data Buffer 105 * word 4: unused 106 * word 5: timestamp word 1 107 * word 6: timestamp word 2 108 */ 109 static unsigned int macb_dma_desc_get_size(struct macb *bp) 110 { 111 #ifdef MACB_EXT_DESC 112 unsigned int desc_size; 113 114 switch (bp->hw_dma_cap) { 115 case HW_DMA_CAP_64B: 116 desc_size = sizeof(struct macb_dma_desc) 117 + sizeof(struct macb_dma_desc_64); 118 break; 119 case HW_DMA_CAP_PTP: 120 desc_size = sizeof(struct macb_dma_desc) 121 + sizeof(struct macb_dma_desc_ptp); 122 break; 123 case HW_DMA_CAP_64B_PTP: 124 desc_size = sizeof(struct macb_dma_desc) 125 + sizeof(struct macb_dma_desc_64) 126 + sizeof(struct macb_dma_desc_ptp); 127 break; 128 default: 129 desc_size = sizeof(struct macb_dma_desc); 130 } 131 return desc_size; 132 #endif 133 return sizeof(struct macb_dma_desc); 134 } 135 136 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 137 { 138 #ifdef MACB_EXT_DESC 139 switch (bp->hw_dma_cap) { 140 case HW_DMA_CAP_64B: 141 case HW_DMA_CAP_PTP: 142 desc_idx <<= 1; 143 break; 144 case HW_DMA_CAP_64B_PTP: 145 desc_idx *= 3; 146 break; 147 default: 148 break; 149 } 150 #endif 151 return desc_idx; 152 } 153 154 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 155 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 156 { 157 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 158 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); 159 return NULL; 160 } 161 #endif 162 163 /* Ring buffer accessors */ 164 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 165 { 166 return index & (bp->tx_ring_size - 1); 167 } 168 169 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 170 unsigned int index) 171 { 172 index = macb_tx_ring_wrap(queue->bp, index); 173 index = macb_adj_dma_desc_idx(queue->bp, index); 174 return &queue->tx_ring[index]; 175 } 176 177 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 178 unsigned int index) 179 { 180 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 181 } 182 183 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 184 { 185 dma_addr_t offset; 186 187 offset = macb_tx_ring_wrap(queue->bp, index) * 188 macb_dma_desc_get_size(queue->bp); 189 190 return queue->tx_ring_dma + offset; 191 } 192 193 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 194 { 195 return index & (bp->rx_ring_size - 1); 196 } 197 198 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) 199 { 200 index = macb_rx_ring_wrap(queue->bp, index); 201 index = macb_adj_dma_desc_idx(queue->bp, index); 202 return &queue->rx_ring[index]; 203 } 204 205 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) 206 { 207 return queue->rx_buffers + queue->bp->rx_buffer_size * 208 macb_rx_ring_wrap(queue->bp, index); 209 } 210 211 /* I/O accessors */ 212 static u32 hw_readl_native(struct macb *bp, int offset) 213 { 214 return __raw_readl(bp->regs + offset); 215 } 216 217 static void hw_writel_native(struct macb *bp, int offset, u32 value) 218 { 219 __raw_writel(value, bp->regs + offset); 220 } 221 222 static u32 hw_readl(struct macb *bp, int offset) 223 { 224 return readl_relaxed(bp->regs + offset); 225 } 226 227 static void hw_writel(struct macb *bp, int offset, u32 value) 228 { 229 writel_relaxed(value, bp->regs + offset); 230 } 231 232 /* Find the CPU endianness by using the loopback bit of NCR register. When the 233 * CPU is in big endian we need to program swapped mode for management 234 * descriptor access. 235 */ 236 static bool hw_is_native_io(void __iomem *addr) 237 { 238 u32 value = MACB_BIT(LLB); 239 240 __raw_writel(value, addr + MACB_NCR); 241 value = __raw_readl(addr + MACB_NCR); 242 243 /* Write 0 back to disable everything */ 244 __raw_writel(0, addr + MACB_NCR); 245 246 return value == MACB_BIT(LLB); 247 } 248 249 static bool hw_is_gem(void __iomem *addr, bool native_io) 250 { 251 u32 id; 252 253 if (native_io) 254 id = __raw_readl(addr + MACB_MID); 255 else 256 id = readl_relaxed(addr + MACB_MID); 257 258 return MACB_BFEXT(IDNUM, id) >= 0x2; 259 } 260 261 static void macb_set_hwaddr(struct macb *bp) 262 { 263 u32 bottom; 264 u16 top; 265 266 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 267 macb_or_gem_writel(bp, SA1B, bottom); 268 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 269 macb_or_gem_writel(bp, SA1T, top); 270 271 /* Clear unused address register sets */ 272 macb_or_gem_writel(bp, SA2B, 0); 273 macb_or_gem_writel(bp, SA2T, 0); 274 macb_or_gem_writel(bp, SA3B, 0); 275 macb_or_gem_writel(bp, SA3T, 0); 276 macb_or_gem_writel(bp, SA4B, 0); 277 macb_or_gem_writel(bp, SA4T, 0); 278 } 279 280 static void macb_get_hwaddr(struct macb *bp) 281 { 282 struct macb_platform_data *pdata; 283 u32 bottom; 284 u16 top; 285 u8 addr[6]; 286 int i; 287 288 pdata = dev_get_platdata(&bp->pdev->dev); 289 290 /* Check all 4 address register for valid address */ 291 for (i = 0; i < 4; i++) { 292 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 293 top = macb_or_gem_readl(bp, SA1T + i * 8); 294 295 if (pdata && pdata->rev_eth_addr) { 296 addr[5] = bottom & 0xff; 297 addr[4] = (bottom >> 8) & 0xff; 298 addr[3] = (bottom >> 16) & 0xff; 299 addr[2] = (bottom >> 24) & 0xff; 300 addr[1] = top & 0xff; 301 addr[0] = (top & 0xff00) >> 8; 302 } else { 303 addr[0] = bottom & 0xff; 304 addr[1] = (bottom >> 8) & 0xff; 305 addr[2] = (bottom >> 16) & 0xff; 306 addr[3] = (bottom >> 24) & 0xff; 307 addr[4] = top & 0xff; 308 addr[5] = (top >> 8) & 0xff; 309 } 310 311 if (is_valid_ether_addr(addr)) { 312 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 313 return; 314 } 315 } 316 317 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 318 eth_hw_addr_random(bp->dev); 319 } 320 321 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 322 { 323 struct macb *bp = bus->priv; 324 int value; 325 326 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 327 | MACB_BF(RW, MACB_MAN_READ) 328 | MACB_BF(PHYA, mii_id) 329 | MACB_BF(REGA, regnum) 330 | MACB_BF(CODE, MACB_MAN_CODE))); 331 332 /* wait for end of transfer */ 333 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 334 cpu_relax(); 335 336 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 337 338 return value; 339 } 340 341 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 342 u16 value) 343 { 344 struct macb *bp = bus->priv; 345 346 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 347 | MACB_BF(RW, MACB_MAN_WRITE) 348 | MACB_BF(PHYA, mii_id) 349 | MACB_BF(REGA, regnum) 350 | MACB_BF(CODE, MACB_MAN_CODE) 351 | MACB_BF(DATA, value))); 352 353 /* wait for end of transfer */ 354 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 355 cpu_relax(); 356 357 return 0; 358 } 359 360 /** 361 * macb_set_tx_clk() - Set a clock to a new frequency 362 * @clk Pointer to the clock to change 363 * @rate New frequency in Hz 364 * @dev Pointer to the struct net_device 365 */ 366 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 367 { 368 long ferr, rate, rate_rounded; 369 370 if (!clk) 371 return; 372 373 switch (speed) { 374 case SPEED_10: 375 rate = 2500000; 376 break; 377 case SPEED_100: 378 rate = 25000000; 379 break; 380 case SPEED_1000: 381 rate = 125000000; 382 break; 383 default: 384 return; 385 } 386 387 rate_rounded = clk_round_rate(clk, rate); 388 if (rate_rounded < 0) 389 return; 390 391 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 392 * is not satisfied. 393 */ 394 ferr = abs(rate_rounded - rate); 395 ferr = DIV_ROUND_UP(ferr, rate / 100000); 396 if (ferr > 5) 397 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 398 rate); 399 400 if (clk_set_rate(clk, rate_rounded)) 401 netdev_err(dev, "adjusting tx_clk failed.\n"); 402 } 403 404 static void macb_handle_link_change(struct net_device *dev) 405 { 406 struct macb *bp = netdev_priv(dev); 407 struct phy_device *phydev = dev->phydev; 408 unsigned long flags; 409 int status_change = 0; 410 411 spin_lock_irqsave(&bp->lock, flags); 412 413 if (phydev->link) { 414 if ((bp->speed != phydev->speed) || 415 (bp->duplex != phydev->duplex)) { 416 u32 reg; 417 418 reg = macb_readl(bp, NCFGR); 419 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 420 if (macb_is_gem(bp)) 421 reg &= ~GEM_BIT(GBE); 422 423 if (phydev->duplex) 424 reg |= MACB_BIT(FD); 425 if (phydev->speed == SPEED_100) 426 reg |= MACB_BIT(SPD); 427 if (phydev->speed == SPEED_1000 && 428 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 429 reg |= GEM_BIT(GBE); 430 431 macb_or_gem_writel(bp, NCFGR, reg); 432 433 bp->speed = phydev->speed; 434 bp->duplex = phydev->duplex; 435 status_change = 1; 436 } 437 } 438 439 if (phydev->link != bp->link) { 440 if (!phydev->link) { 441 bp->speed = 0; 442 bp->duplex = -1; 443 } 444 bp->link = phydev->link; 445 446 status_change = 1; 447 } 448 449 spin_unlock_irqrestore(&bp->lock, flags); 450 451 if (status_change) { 452 if (phydev->link) { 453 /* Update the TX clock rate if and only if the link is 454 * up and there has been a link change. 455 */ 456 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); 457 458 netif_carrier_on(dev); 459 netdev_info(dev, "link up (%d/%s)\n", 460 phydev->speed, 461 phydev->duplex == DUPLEX_FULL ? 462 "Full" : "Half"); 463 } else { 464 netif_carrier_off(dev); 465 netdev_info(dev, "link down\n"); 466 } 467 } 468 } 469 470 /* based on au1000_eth. c*/ 471 static int macb_mii_probe(struct net_device *dev) 472 { 473 struct macb *bp = netdev_priv(dev); 474 struct macb_platform_data *pdata; 475 struct phy_device *phydev; 476 struct device_node *np; 477 int phy_irq, ret, i; 478 479 pdata = dev_get_platdata(&bp->pdev->dev); 480 np = bp->pdev->dev.of_node; 481 ret = 0; 482 483 if (np) { 484 if (of_phy_is_fixed_link(np)) { 485 bp->phy_node = of_node_get(np); 486 } else { 487 bp->phy_node = of_parse_phandle(np, "phy-handle", 0); 488 /* fallback to standard phy registration if no 489 * phy-handle was found nor any phy found during 490 * dt phy registration 491 */ 492 if (!bp->phy_node && !phy_find_first(bp->mii_bus)) { 493 for (i = 0; i < PHY_MAX_ADDR; i++) { 494 struct phy_device *phydev; 495 496 phydev = mdiobus_scan(bp->mii_bus, i); 497 if (IS_ERR(phydev) && 498 PTR_ERR(phydev) != -ENODEV) { 499 ret = PTR_ERR(phydev); 500 break; 501 } 502 } 503 504 if (ret) 505 return -ENODEV; 506 } 507 } 508 } 509 510 if (bp->phy_node) { 511 phydev = of_phy_connect(dev, bp->phy_node, 512 &macb_handle_link_change, 0, 513 bp->phy_interface); 514 if (!phydev) 515 return -ENODEV; 516 } else { 517 phydev = phy_find_first(bp->mii_bus); 518 if (!phydev) { 519 netdev_err(dev, "no PHY found\n"); 520 return -ENXIO; 521 } 522 523 if (pdata) { 524 if (gpio_is_valid(pdata->phy_irq_pin)) { 525 ret = devm_gpio_request(&bp->pdev->dev, 526 pdata->phy_irq_pin, "phy int"); 527 if (!ret) { 528 phy_irq = gpio_to_irq(pdata->phy_irq_pin); 529 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; 530 } 531 } else { 532 phydev->irq = PHY_POLL; 533 } 534 } 535 536 /* attach the mac to the phy */ 537 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 538 bp->phy_interface); 539 if (ret) { 540 netdev_err(dev, "Could not attach to PHY\n"); 541 return ret; 542 } 543 } 544 545 /* mask with MAC supported features */ 546 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 547 phy_set_max_speed(phydev, SPEED_1000); 548 else 549 phy_set_max_speed(phydev, SPEED_100); 550 551 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) 552 phy_remove_link_mode(phydev, 553 ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 554 555 bp->link = 0; 556 bp->speed = 0; 557 bp->duplex = -1; 558 559 return 0; 560 } 561 562 static int macb_mii_init(struct macb *bp) 563 { 564 struct macb_platform_data *pdata; 565 struct device_node *np; 566 int err = -ENXIO; 567 568 /* Enable management port */ 569 macb_writel(bp, NCR, MACB_BIT(MPE)); 570 571 bp->mii_bus = mdiobus_alloc(); 572 if (!bp->mii_bus) { 573 err = -ENOMEM; 574 goto err_out; 575 } 576 577 bp->mii_bus->name = "MACB_mii_bus"; 578 bp->mii_bus->read = &macb_mdio_read; 579 bp->mii_bus->write = &macb_mdio_write; 580 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 581 bp->pdev->name, bp->pdev->id); 582 bp->mii_bus->priv = bp; 583 bp->mii_bus->parent = &bp->pdev->dev; 584 pdata = dev_get_platdata(&bp->pdev->dev); 585 586 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 587 588 np = bp->pdev->dev.of_node; 589 if (np && of_phy_is_fixed_link(np)) { 590 if (of_phy_register_fixed_link(np) < 0) { 591 dev_err(&bp->pdev->dev, 592 "broken fixed-link specification %pOF\n", np); 593 goto err_out_free_mdiobus; 594 } 595 596 err = mdiobus_register(bp->mii_bus); 597 } else { 598 if (pdata) 599 bp->mii_bus->phy_mask = pdata->phy_mask; 600 601 err = of_mdiobus_register(bp->mii_bus, np); 602 } 603 604 if (err) 605 goto err_out_free_fixed_link; 606 607 err = macb_mii_probe(bp->dev); 608 if (err) 609 goto err_out_unregister_bus; 610 611 return 0; 612 613 err_out_unregister_bus: 614 mdiobus_unregister(bp->mii_bus); 615 err_out_free_fixed_link: 616 if (np && of_phy_is_fixed_link(np)) 617 of_phy_deregister_fixed_link(np); 618 err_out_free_mdiobus: 619 of_node_put(bp->phy_node); 620 mdiobus_free(bp->mii_bus); 621 err_out: 622 return err; 623 } 624 625 static void macb_update_stats(struct macb *bp) 626 { 627 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 628 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 629 int offset = MACB_PFR; 630 631 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 632 633 for (; p < end; p++, offset += 4) 634 *p += bp->macb_reg_readl(bp, offset); 635 } 636 637 static int macb_halt_tx(struct macb *bp) 638 { 639 unsigned long halt_time, timeout; 640 u32 status; 641 642 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 643 644 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 645 do { 646 halt_time = jiffies; 647 status = macb_readl(bp, TSR); 648 if (!(status & MACB_BIT(TGO))) 649 return 0; 650 651 udelay(250); 652 } while (time_before(halt_time, timeout)); 653 654 return -ETIMEDOUT; 655 } 656 657 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 658 { 659 if (tx_skb->mapping) { 660 if (tx_skb->mapped_as_page) 661 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 662 tx_skb->size, DMA_TO_DEVICE); 663 else 664 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 665 tx_skb->size, DMA_TO_DEVICE); 666 tx_skb->mapping = 0; 667 } 668 669 if (tx_skb->skb) { 670 dev_kfree_skb_any(tx_skb->skb); 671 tx_skb->skb = NULL; 672 } 673 } 674 675 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 676 { 677 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 678 struct macb_dma_desc_64 *desc_64; 679 680 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 681 desc_64 = macb_64b_desc(bp, desc); 682 desc_64->addrh = upper_32_bits(addr); 683 } 684 #endif 685 desc->addr = lower_32_bits(addr); 686 } 687 688 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 689 { 690 dma_addr_t addr = 0; 691 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 692 struct macb_dma_desc_64 *desc_64; 693 694 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 695 desc_64 = macb_64b_desc(bp, desc); 696 addr = ((u64)(desc_64->addrh) << 32); 697 } 698 #endif 699 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 700 return addr; 701 } 702 703 static void macb_tx_error_task(struct work_struct *work) 704 { 705 struct macb_queue *queue = container_of(work, struct macb_queue, 706 tx_error_task); 707 struct macb *bp = queue->bp; 708 struct macb_tx_skb *tx_skb; 709 struct macb_dma_desc *desc; 710 struct sk_buff *skb; 711 unsigned int tail; 712 unsigned long flags; 713 714 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 715 (unsigned int)(queue - bp->queues), 716 queue->tx_tail, queue->tx_head); 717 718 /* Prevent the queue IRQ handlers from running: each of them may call 719 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 720 * As explained below, we have to halt the transmission before updating 721 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 722 * network engine about the macb/gem being halted. 723 */ 724 spin_lock_irqsave(&bp->lock, flags); 725 726 /* Make sure nobody is trying to queue up new packets */ 727 netif_tx_stop_all_queues(bp->dev); 728 729 /* Stop transmission now 730 * (in case we have just queued new packets) 731 * macb/gem must be halted to write TBQP register 732 */ 733 if (macb_halt_tx(bp)) 734 /* Just complain for now, reinitializing TX path can be good */ 735 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 736 737 /* Treat frames in TX queue including the ones that caused the error. 738 * Free transmit buffers in upper layer. 739 */ 740 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 741 u32 ctrl; 742 743 desc = macb_tx_desc(queue, tail); 744 ctrl = desc->ctrl; 745 tx_skb = macb_tx_skb(queue, tail); 746 skb = tx_skb->skb; 747 748 if (ctrl & MACB_BIT(TX_USED)) { 749 /* skb is set for the last buffer of the frame */ 750 while (!skb) { 751 macb_tx_unmap(bp, tx_skb); 752 tail++; 753 tx_skb = macb_tx_skb(queue, tail); 754 skb = tx_skb->skb; 755 } 756 757 /* ctrl still refers to the first buffer descriptor 758 * since it's the only one written back by the hardware 759 */ 760 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 761 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 762 macb_tx_ring_wrap(bp, tail), 763 skb->data); 764 bp->dev->stats.tx_packets++; 765 queue->stats.tx_packets++; 766 bp->dev->stats.tx_bytes += skb->len; 767 queue->stats.tx_bytes += skb->len; 768 } 769 } else { 770 /* "Buffers exhausted mid-frame" errors may only happen 771 * if the driver is buggy, so complain loudly about 772 * those. Statistics are updated by hardware. 773 */ 774 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 775 netdev_err(bp->dev, 776 "BUG: TX buffers exhausted mid-frame\n"); 777 778 desc->ctrl = ctrl | MACB_BIT(TX_USED); 779 } 780 781 macb_tx_unmap(bp, tx_skb); 782 } 783 784 /* Set end of TX queue */ 785 desc = macb_tx_desc(queue, 0); 786 macb_set_addr(bp, desc, 0); 787 desc->ctrl = MACB_BIT(TX_USED); 788 789 /* Make descriptor updates visible to hardware */ 790 wmb(); 791 792 /* Reinitialize the TX desc queue */ 793 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 794 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 795 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 796 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 797 #endif 798 /* Make TX ring reflect state of hardware */ 799 queue->tx_head = 0; 800 queue->tx_tail = 0; 801 802 /* Housework before enabling TX IRQ */ 803 macb_writel(bp, TSR, macb_readl(bp, TSR)); 804 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 805 806 /* Now we are ready to start transmission again */ 807 netif_tx_start_all_queues(bp->dev); 808 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 809 810 spin_unlock_irqrestore(&bp->lock, flags); 811 } 812 813 static void macb_tx_interrupt(struct macb_queue *queue) 814 { 815 unsigned int tail; 816 unsigned int head; 817 u32 status; 818 struct macb *bp = queue->bp; 819 u16 queue_index = queue - bp->queues; 820 821 status = macb_readl(bp, TSR); 822 macb_writel(bp, TSR, status); 823 824 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 825 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 826 827 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 828 (unsigned long)status); 829 830 head = queue->tx_head; 831 for (tail = queue->tx_tail; tail != head; tail++) { 832 struct macb_tx_skb *tx_skb; 833 struct sk_buff *skb; 834 struct macb_dma_desc *desc; 835 u32 ctrl; 836 837 desc = macb_tx_desc(queue, tail); 838 839 /* Make hw descriptor updates visible to CPU */ 840 rmb(); 841 842 ctrl = desc->ctrl; 843 844 /* TX_USED bit is only set by hardware on the very first buffer 845 * descriptor of the transmitted frame. 846 */ 847 if (!(ctrl & MACB_BIT(TX_USED))) 848 break; 849 850 /* Process all buffers of the current transmitted frame */ 851 for (;; tail++) { 852 tx_skb = macb_tx_skb(queue, tail); 853 skb = tx_skb->skb; 854 855 /* First, update TX stats if needed */ 856 if (skb) { 857 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { 858 /* skb now belongs to timestamp buffer 859 * and will be removed later 860 */ 861 tx_skb->skb = NULL; 862 } 863 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 864 macb_tx_ring_wrap(bp, tail), 865 skb->data); 866 bp->dev->stats.tx_packets++; 867 queue->stats.tx_packets++; 868 bp->dev->stats.tx_bytes += skb->len; 869 queue->stats.tx_bytes += skb->len; 870 } 871 872 /* Now we can safely release resources */ 873 macb_tx_unmap(bp, tx_skb); 874 875 /* skb is set only for the last buffer of the frame. 876 * WARNING: at this point skb has been freed by 877 * macb_tx_unmap(). 878 */ 879 if (skb) 880 break; 881 } 882 } 883 884 queue->tx_tail = tail; 885 if (__netif_subqueue_stopped(bp->dev, queue_index) && 886 CIRC_CNT(queue->tx_head, queue->tx_tail, 887 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 888 netif_wake_subqueue(bp->dev, queue_index); 889 } 890 891 static void gem_rx_refill(struct macb_queue *queue) 892 { 893 unsigned int entry; 894 struct sk_buff *skb; 895 dma_addr_t paddr; 896 struct macb *bp = queue->bp; 897 struct macb_dma_desc *desc; 898 899 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, 900 bp->rx_ring_size) > 0) { 901 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); 902 903 /* Make hw descriptor updates visible to CPU */ 904 rmb(); 905 906 queue->rx_prepared_head++; 907 desc = macb_rx_desc(queue, entry); 908 909 if (!queue->rx_skbuff[entry]) { 910 /* allocate sk_buff for this free entry in ring */ 911 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 912 if (unlikely(!skb)) { 913 netdev_err(bp->dev, 914 "Unable to allocate sk_buff\n"); 915 break; 916 } 917 918 /* now fill corresponding descriptor entry */ 919 paddr = dma_map_single(&bp->pdev->dev, skb->data, 920 bp->rx_buffer_size, 921 DMA_FROM_DEVICE); 922 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 923 dev_kfree_skb(skb); 924 break; 925 } 926 927 queue->rx_skbuff[entry] = skb; 928 929 if (entry == bp->rx_ring_size - 1) 930 paddr |= MACB_BIT(RX_WRAP); 931 macb_set_addr(bp, desc, paddr); 932 desc->ctrl = 0; 933 934 /* properly align Ethernet header */ 935 skb_reserve(skb, NET_IP_ALIGN); 936 } else { 937 desc->addr &= ~MACB_BIT(RX_USED); 938 desc->ctrl = 0; 939 } 940 } 941 942 /* Make descriptor updates visible to hardware */ 943 wmb(); 944 945 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", 946 queue, queue->rx_prepared_head, queue->rx_tail); 947 } 948 949 /* Mark DMA descriptors from begin up to and not including end as unused */ 950 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, 951 unsigned int end) 952 { 953 unsigned int frag; 954 955 for (frag = begin; frag != end; frag++) { 956 struct macb_dma_desc *desc = macb_rx_desc(queue, frag); 957 958 desc->addr &= ~MACB_BIT(RX_USED); 959 } 960 961 /* Make descriptor updates visible to hardware */ 962 wmb(); 963 964 /* When this happens, the hardware stats registers for 965 * whatever caused this is updated, so we don't have to record 966 * anything. 967 */ 968 } 969 970 static int gem_rx(struct macb_queue *queue, int budget) 971 { 972 struct macb *bp = queue->bp; 973 unsigned int len; 974 unsigned int entry; 975 struct sk_buff *skb; 976 struct macb_dma_desc *desc; 977 int count = 0; 978 979 while (count < budget) { 980 u32 ctrl; 981 dma_addr_t addr; 982 bool rxused; 983 984 entry = macb_rx_ring_wrap(bp, queue->rx_tail); 985 desc = macb_rx_desc(queue, entry); 986 987 /* Make hw descriptor updates visible to CPU */ 988 rmb(); 989 990 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 991 addr = macb_get_addr(bp, desc); 992 ctrl = desc->ctrl; 993 994 if (!rxused) 995 break; 996 997 queue->rx_tail++; 998 count++; 999 1000 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 1001 netdev_err(bp->dev, 1002 "not whole frame pointed by descriptor\n"); 1003 bp->dev->stats.rx_dropped++; 1004 queue->stats.rx_dropped++; 1005 break; 1006 } 1007 skb = queue->rx_skbuff[entry]; 1008 if (unlikely(!skb)) { 1009 netdev_err(bp->dev, 1010 "inconsistent Rx descriptor chain\n"); 1011 bp->dev->stats.rx_dropped++; 1012 queue->stats.rx_dropped++; 1013 break; 1014 } 1015 /* now everything is ready for receiving packet */ 1016 queue->rx_skbuff[entry] = NULL; 1017 len = ctrl & bp->rx_frm_len_mask; 1018 1019 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1020 1021 skb_put(skb, len); 1022 dma_unmap_single(&bp->pdev->dev, addr, 1023 bp->rx_buffer_size, DMA_FROM_DEVICE); 1024 1025 skb->protocol = eth_type_trans(skb, bp->dev); 1026 skb_checksum_none_assert(skb); 1027 if (bp->dev->features & NETIF_F_RXCSUM && 1028 !(bp->dev->flags & IFF_PROMISC) && 1029 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1030 skb->ip_summed = CHECKSUM_UNNECESSARY; 1031 1032 bp->dev->stats.rx_packets++; 1033 queue->stats.rx_packets++; 1034 bp->dev->stats.rx_bytes += skb->len; 1035 queue->stats.rx_bytes += skb->len; 1036 1037 gem_ptp_do_rxstamp(bp, skb, desc); 1038 1039 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1040 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1041 skb->len, skb->csum); 1042 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1043 skb_mac_header(skb), 16, true); 1044 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1045 skb->data, 32, true); 1046 #endif 1047 1048 netif_receive_skb(skb); 1049 } 1050 1051 gem_rx_refill(queue); 1052 1053 return count; 1054 } 1055 1056 static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, 1057 unsigned int last_frag) 1058 { 1059 unsigned int len; 1060 unsigned int frag; 1061 unsigned int offset; 1062 struct sk_buff *skb; 1063 struct macb_dma_desc *desc; 1064 struct macb *bp = queue->bp; 1065 1066 desc = macb_rx_desc(queue, last_frag); 1067 len = desc->ctrl & bp->rx_frm_len_mask; 1068 1069 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1070 macb_rx_ring_wrap(bp, first_frag), 1071 macb_rx_ring_wrap(bp, last_frag), len); 1072 1073 /* The ethernet header starts NET_IP_ALIGN bytes into the 1074 * first buffer. Since the header is 14 bytes, this makes the 1075 * payload word-aligned. 1076 * 1077 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1078 * the two padding bytes into the skb so that we avoid hitting 1079 * the slowpath in memcpy(), and pull them off afterwards. 1080 */ 1081 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1082 if (!skb) { 1083 bp->dev->stats.rx_dropped++; 1084 for (frag = first_frag; ; frag++) { 1085 desc = macb_rx_desc(queue, frag); 1086 desc->addr &= ~MACB_BIT(RX_USED); 1087 if (frag == last_frag) 1088 break; 1089 } 1090 1091 /* Make descriptor updates visible to hardware */ 1092 wmb(); 1093 1094 return 1; 1095 } 1096 1097 offset = 0; 1098 len += NET_IP_ALIGN; 1099 skb_checksum_none_assert(skb); 1100 skb_put(skb, len); 1101 1102 for (frag = first_frag; ; frag++) { 1103 unsigned int frag_len = bp->rx_buffer_size; 1104 1105 if (offset + frag_len > len) { 1106 if (unlikely(frag != last_frag)) { 1107 dev_kfree_skb_any(skb); 1108 return -1; 1109 } 1110 frag_len = len - offset; 1111 } 1112 skb_copy_to_linear_data_offset(skb, offset, 1113 macb_rx_buffer(queue, frag), 1114 frag_len); 1115 offset += bp->rx_buffer_size; 1116 desc = macb_rx_desc(queue, frag); 1117 desc->addr &= ~MACB_BIT(RX_USED); 1118 1119 if (frag == last_frag) 1120 break; 1121 } 1122 1123 /* Make descriptor updates visible to hardware */ 1124 wmb(); 1125 1126 __skb_pull(skb, NET_IP_ALIGN); 1127 skb->protocol = eth_type_trans(skb, bp->dev); 1128 1129 bp->dev->stats.rx_packets++; 1130 bp->dev->stats.rx_bytes += skb->len; 1131 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1132 skb->len, skb->csum); 1133 netif_receive_skb(skb); 1134 1135 return 0; 1136 } 1137 1138 static inline void macb_init_rx_ring(struct macb_queue *queue) 1139 { 1140 struct macb *bp = queue->bp; 1141 dma_addr_t addr; 1142 struct macb_dma_desc *desc = NULL; 1143 int i; 1144 1145 addr = queue->rx_buffers_dma; 1146 for (i = 0; i < bp->rx_ring_size; i++) { 1147 desc = macb_rx_desc(queue, i); 1148 macb_set_addr(bp, desc, addr); 1149 desc->ctrl = 0; 1150 addr += bp->rx_buffer_size; 1151 } 1152 desc->addr |= MACB_BIT(RX_WRAP); 1153 queue->rx_tail = 0; 1154 } 1155 1156 static int macb_rx(struct macb_queue *queue, int budget) 1157 { 1158 struct macb *bp = queue->bp; 1159 bool reset_rx_queue = false; 1160 int received = 0; 1161 unsigned int tail; 1162 int first_frag = -1; 1163 1164 for (tail = queue->rx_tail; budget > 0; tail++) { 1165 struct macb_dma_desc *desc = macb_rx_desc(queue, tail); 1166 u32 ctrl; 1167 1168 /* Make hw descriptor updates visible to CPU */ 1169 rmb(); 1170 1171 ctrl = desc->ctrl; 1172 1173 if (!(desc->addr & MACB_BIT(RX_USED))) 1174 break; 1175 1176 if (ctrl & MACB_BIT(RX_SOF)) { 1177 if (first_frag != -1) 1178 discard_partial_frame(queue, first_frag, tail); 1179 first_frag = tail; 1180 } 1181 1182 if (ctrl & MACB_BIT(RX_EOF)) { 1183 int dropped; 1184 1185 if (unlikely(first_frag == -1)) { 1186 reset_rx_queue = true; 1187 continue; 1188 } 1189 1190 dropped = macb_rx_frame(queue, first_frag, tail); 1191 first_frag = -1; 1192 if (unlikely(dropped < 0)) { 1193 reset_rx_queue = true; 1194 continue; 1195 } 1196 if (!dropped) { 1197 received++; 1198 budget--; 1199 } 1200 } 1201 } 1202 1203 if (unlikely(reset_rx_queue)) { 1204 unsigned long flags; 1205 u32 ctrl; 1206 1207 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1208 1209 spin_lock_irqsave(&bp->lock, flags); 1210 1211 ctrl = macb_readl(bp, NCR); 1212 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1213 1214 macb_init_rx_ring(queue); 1215 queue_writel(queue, RBQP, queue->rx_ring_dma); 1216 1217 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1218 1219 spin_unlock_irqrestore(&bp->lock, flags); 1220 return received; 1221 } 1222 1223 if (first_frag != -1) 1224 queue->rx_tail = first_frag; 1225 else 1226 queue->rx_tail = tail; 1227 1228 return received; 1229 } 1230 1231 static int macb_poll(struct napi_struct *napi, int budget) 1232 { 1233 struct macb_queue *queue = container_of(napi, struct macb_queue, napi); 1234 struct macb *bp = queue->bp; 1235 int work_done; 1236 u32 status; 1237 1238 status = macb_readl(bp, RSR); 1239 macb_writel(bp, RSR, status); 1240 1241 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1242 (unsigned long)status, budget); 1243 1244 work_done = bp->macbgem_ops.mog_rx(queue, budget); 1245 if (work_done < budget) { 1246 napi_complete_done(napi, work_done); 1247 1248 /* Packets received while interrupts were disabled */ 1249 status = macb_readl(bp, RSR); 1250 if (status) { 1251 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1252 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1253 napi_reschedule(napi); 1254 } else { 1255 queue_writel(queue, IER, MACB_RX_INT_FLAGS); 1256 } 1257 } 1258 1259 /* TODO: Handle errors */ 1260 1261 return work_done; 1262 } 1263 1264 static void macb_hresp_error_task(unsigned long data) 1265 { 1266 struct macb *bp = (struct macb *)data; 1267 struct net_device *dev = bp->dev; 1268 struct macb_queue *queue = bp->queues; 1269 unsigned int q; 1270 u32 ctrl; 1271 1272 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1273 queue_writel(queue, IDR, MACB_RX_INT_FLAGS | 1274 MACB_TX_INT_FLAGS | 1275 MACB_BIT(HRESP)); 1276 } 1277 ctrl = macb_readl(bp, NCR); 1278 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 1279 macb_writel(bp, NCR, ctrl); 1280 1281 netif_tx_stop_all_queues(dev); 1282 netif_carrier_off(dev); 1283 1284 bp->macbgem_ops.mog_init_rings(bp); 1285 1286 /* Initialize TX and RX buffers */ 1287 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1288 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 1289 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1290 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 1291 queue_writel(queue, RBQPH, 1292 upper_32_bits(queue->rx_ring_dma)); 1293 #endif 1294 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 1295 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1296 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 1297 queue_writel(queue, TBQPH, 1298 upper_32_bits(queue->tx_ring_dma)); 1299 #endif 1300 1301 /* Enable interrupts */ 1302 queue_writel(queue, IER, 1303 MACB_RX_INT_FLAGS | 1304 MACB_TX_INT_FLAGS | 1305 MACB_BIT(HRESP)); 1306 } 1307 1308 ctrl |= MACB_BIT(RE) | MACB_BIT(TE); 1309 macb_writel(bp, NCR, ctrl); 1310 1311 netif_carrier_on(dev); 1312 netif_tx_start_all_queues(dev); 1313 } 1314 1315 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1316 { 1317 struct macb_queue *queue = dev_id; 1318 struct macb *bp = queue->bp; 1319 struct net_device *dev = bp->dev; 1320 u32 status, ctrl; 1321 1322 status = queue_readl(queue, ISR); 1323 1324 if (unlikely(!status)) 1325 return IRQ_NONE; 1326 1327 spin_lock(&bp->lock); 1328 1329 while (status) { 1330 /* close possible race with dev_close */ 1331 if (unlikely(!netif_running(dev))) { 1332 queue_writel(queue, IDR, -1); 1333 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1334 queue_writel(queue, ISR, -1); 1335 break; 1336 } 1337 1338 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1339 (unsigned int)(queue - bp->queues), 1340 (unsigned long)status); 1341 1342 if (status & MACB_RX_INT_FLAGS) { 1343 /* There's no point taking any more interrupts 1344 * until we have processed the buffers. The 1345 * scheduling call may fail if the poll routine 1346 * is already scheduled, so disable interrupts 1347 * now. 1348 */ 1349 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1350 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1351 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1352 1353 if (napi_schedule_prep(&queue->napi)) { 1354 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1355 __napi_schedule(&queue->napi); 1356 } 1357 } 1358 1359 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1360 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1361 schedule_work(&queue->tx_error_task); 1362 1363 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1364 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1365 1366 break; 1367 } 1368 1369 if (status & MACB_BIT(TCOMP)) 1370 macb_tx_interrupt(queue); 1371 1372 /* Link change detection isn't possible with RMII, so we'll 1373 * add that if/when we get our hands on a full-blown MII PHY. 1374 */ 1375 1376 /* There is a hardware issue under heavy load where DMA can 1377 * stop, this causes endless "used buffer descriptor read" 1378 * interrupts but it can be cleared by re-enabling RX. See 1379 * the at91 manual, section 41.3.1 or the Zynq manual 1380 * section 16.7.4 for details. 1381 */ 1382 if (status & MACB_BIT(RXUBR)) { 1383 ctrl = macb_readl(bp, NCR); 1384 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1385 wmb(); 1386 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1387 1388 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1389 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1390 } 1391 1392 if (status & MACB_BIT(ISR_ROVR)) { 1393 /* We missed at least one packet */ 1394 if (macb_is_gem(bp)) 1395 bp->hw_stats.gem.rx_overruns++; 1396 else 1397 bp->hw_stats.macb.rx_overruns++; 1398 1399 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1400 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1401 } 1402 1403 if (status & MACB_BIT(HRESP)) { 1404 tasklet_schedule(&bp->hresp_err_tasklet); 1405 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1406 1407 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1408 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1409 } 1410 status = queue_readl(queue, ISR); 1411 } 1412 1413 spin_unlock(&bp->lock); 1414 1415 return IRQ_HANDLED; 1416 } 1417 1418 #ifdef CONFIG_NET_POLL_CONTROLLER 1419 /* Polling receive - used by netconsole and other diagnostic tools 1420 * to allow network i/o with interrupts disabled. 1421 */ 1422 static void macb_poll_controller(struct net_device *dev) 1423 { 1424 struct macb *bp = netdev_priv(dev); 1425 struct macb_queue *queue; 1426 unsigned long flags; 1427 unsigned int q; 1428 1429 local_irq_save(flags); 1430 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1431 macb_interrupt(dev->irq, queue); 1432 local_irq_restore(flags); 1433 } 1434 #endif 1435 1436 static unsigned int macb_tx_map(struct macb *bp, 1437 struct macb_queue *queue, 1438 struct sk_buff *skb, 1439 unsigned int hdrlen) 1440 { 1441 dma_addr_t mapping; 1442 unsigned int len, entry, i, tx_head = queue->tx_head; 1443 struct macb_tx_skb *tx_skb = NULL; 1444 struct macb_dma_desc *desc; 1445 unsigned int offset, size, count = 0; 1446 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1447 unsigned int eof = 1, mss_mfs = 0; 1448 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1449 1450 /* LSO */ 1451 if (skb_shinfo(skb)->gso_size != 0) { 1452 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1453 /* UDP - UFO */ 1454 lso_ctrl = MACB_LSO_UFO_ENABLE; 1455 else 1456 /* TCP - TSO */ 1457 lso_ctrl = MACB_LSO_TSO_ENABLE; 1458 } 1459 1460 /* First, map non-paged data */ 1461 len = skb_headlen(skb); 1462 1463 /* first buffer length */ 1464 size = hdrlen; 1465 1466 offset = 0; 1467 while (len) { 1468 entry = macb_tx_ring_wrap(bp, tx_head); 1469 tx_skb = &queue->tx_skb[entry]; 1470 1471 mapping = dma_map_single(&bp->pdev->dev, 1472 skb->data + offset, 1473 size, DMA_TO_DEVICE); 1474 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1475 goto dma_error; 1476 1477 /* Save info to properly release resources */ 1478 tx_skb->skb = NULL; 1479 tx_skb->mapping = mapping; 1480 tx_skb->size = size; 1481 tx_skb->mapped_as_page = false; 1482 1483 len -= size; 1484 offset += size; 1485 count++; 1486 tx_head++; 1487 1488 size = min(len, bp->max_tx_length); 1489 } 1490 1491 /* Then, map paged data from fragments */ 1492 for (f = 0; f < nr_frags; f++) { 1493 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1494 1495 len = skb_frag_size(frag); 1496 offset = 0; 1497 while (len) { 1498 size = min(len, bp->max_tx_length); 1499 entry = macb_tx_ring_wrap(bp, tx_head); 1500 tx_skb = &queue->tx_skb[entry]; 1501 1502 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1503 offset, size, DMA_TO_DEVICE); 1504 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1505 goto dma_error; 1506 1507 /* Save info to properly release resources */ 1508 tx_skb->skb = NULL; 1509 tx_skb->mapping = mapping; 1510 tx_skb->size = size; 1511 tx_skb->mapped_as_page = true; 1512 1513 len -= size; 1514 offset += size; 1515 count++; 1516 tx_head++; 1517 } 1518 } 1519 1520 /* Should never happen */ 1521 if (unlikely(!tx_skb)) { 1522 netdev_err(bp->dev, "BUG! empty skb!\n"); 1523 return 0; 1524 } 1525 1526 /* This is the last buffer of the frame: save socket buffer */ 1527 tx_skb->skb = skb; 1528 1529 /* Update TX ring: update buffer descriptors in reverse order 1530 * to avoid race condition 1531 */ 1532 1533 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1534 * to set the end of TX queue 1535 */ 1536 i = tx_head; 1537 entry = macb_tx_ring_wrap(bp, i); 1538 ctrl = MACB_BIT(TX_USED); 1539 desc = macb_tx_desc(queue, entry); 1540 desc->ctrl = ctrl; 1541 1542 if (lso_ctrl) { 1543 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1544 /* include header and FCS in value given to h/w */ 1545 mss_mfs = skb_shinfo(skb)->gso_size + 1546 skb_transport_offset(skb) + 1547 ETH_FCS_LEN; 1548 else /* TSO */ { 1549 mss_mfs = skb_shinfo(skb)->gso_size; 1550 /* TCP Sequence Number Source Select 1551 * can be set only for TSO 1552 */ 1553 seq_ctrl = 0; 1554 } 1555 } 1556 1557 do { 1558 i--; 1559 entry = macb_tx_ring_wrap(bp, i); 1560 tx_skb = &queue->tx_skb[entry]; 1561 desc = macb_tx_desc(queue, entry); 1562 1563 ctrl = (u32)tx_skb->size; 1564 if (eof) { 1565 ctrl |= MACB_BIT(TX_LAST); 1566 eof = 0; 1567 } 1568 if (unlikely(entry == (bp->tx_ring_size - 1))) 1569 ctrl |= MACB_BIT(TX_WRAP); 1570 1571 /* First descriptor is header descriptor */ 1572 if (i == queue->tx_head) { 1573 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1574 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1575 if ((bp->dev->features & NETIF_F_HW_CSUM) && 1576 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) 1577 ctrl |= MACB_BIT(TX_NOCRC); 1578 } else 1579 /* Only set MSS/MFS on payload descriptors 1580 * (second or later descriptor) 1581 */ 1582 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1583 1584 /* Set TX buffer descriptor */ 1585 macb_set_addr(bp, desc, tx_skb->mapping); 1586 /* desc->addr must be visible to hardware before clearing 1587 * 'TX_USED' bit in desc->ctrl. 1588 */ 1589 wmb(); 1590 desc->ctrl = ctrl; 1591 } while (i != queue->tx_head); 1592 1593 queue->tx_head = tx_head; 1594 1595 return count; 1596 1597 dma_error: 1598 netdev_err(bp->dev, "TX DMA map failed\n"); 1599 1600 for (i = queue->tx_head; i != tx_head; i++) { 1601 tx_skb = macb_tx_skb(queue, i); 1602 1603 macb_tx_unmap(bp, tx_skb); 1604 } 1605 1606 return 0; 1607 } 1608 1609 static netdev_features_t macb_features_check(struct sk_buff *skb, 1610 struct net_device *dev, 1611 netdev_features_t features) 1612 { 1613 unsigned int nr_frags, f; 1614 unsigned int hdrlen; 1615 1616 /* Validate LSO compatibility */ 1617 1618 /* there is only one buffer */ 1619 if (!skb_is_nonlinear(skb)) 1620 return features; 1621 1622 /* length of header */ 1623 hdrlen = skb_transport_offset(skb); 1624 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1625 hdrlen += tcp_hdrlen(skb); 1626 1627 /* For LSO: 1628 * When software supplies two or more payload buffers all payload buffers 1629 * apart from the last must be a multiple of 8 bytes in size. 1630 */ 1631 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 1632 return features & ~MACB_NETIF_LSO; 1633 1634 nr_frags = skb_shinfo(skb)->nr_frags; 1635 /* No need to check last fragment */ 1636 nr_frags--; 1637 for (f = 0; f < nr_frags; f++) { 1638 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1639 1640 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 1641 return features & ~MACB_NETIF_LSO; 1642 } 1643 return features; 1644 } 1645 1646 static inline int macb_clear_csum(struct sk_buff *skb) 1647 { 1648 /* no change for packets without checksum offloading */ 1649 if (skb->ip_summed != CHECKSUM_PARTIAL) 1650 return 0; 1651 1652 /* make sure we can modify the header */ 1653 if (unlikely(skb_cow_head(skb, 0))) 1654 return -1; 1655 1656 /* initialize checksum field 1657 * This is required - at least for Zynq, which otherwise calculates 1658 * wrong UDP header checksums for UDP packets with UDP data len <=2 1659 */ 1660 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1661 return 0; 1662 } 1663 1664 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) 1665 { 1666 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); 1667 int padlen = ETH_ZLEN - (*skb)->len; 1668 int headroom = skb_headroom(*skb); 1669 int tailroom = skb_tailroom(*skb); 1670 struct sk_buff *nskb; 1671 u32 fcs; 1672 1673 if (!(ndev->features & NETIF_F_HW_CSUM) || 1674 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || 1675 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ 1676 return 0; 1677 1678 if (padlen <= 0) { 1679 /* FCS could be appeded to tailroom. */ 1680 if (tailroom >= ETH_FCS_LEN) 1681 goto add_fcs; 1682 /* FCS could be appeded by moving data to headroom. */ 1683 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 1684 padlen = 0; 1685 /* No room for FCS, need to reallocate skb. */ 1686 else 1687 padlen = ETH_FCS_LEN; 1688 } else { 1689 /* Add room for FCS. */ 1690 padlen += ETH_FCS_LEN; 1691 } 1692 1693 if (!cloned && headroom + tailroom >= padlen) { 1694 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 1695 skb_set_tail_pointer(*skb, (*skb)->len); 1696 } else { 1697 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 1698 if (!nskb) 1699 return -ENOMEM; 1700 1701 dev_kfree_skb_any(*skb); 1702 *skb = nskb; 1703 } 1704 1705 if (padlen) { 1706 if (padlen >= ETH_FCS_LEN) 1707 skb_put_zero(*skb, padlen - ETH_FCS_LEN); 1708 else 1709 skb_trim(*skb, ETH_FCS_LEN - padlen); 1710 } 1711 1712 add_fcs: 1713 /* set FCS to packet */ 1714 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); 1715 fcs = ~fcs; 1716 1717 skb_put_u8(*skb, fcs & 0xff); 1718 skb_put_u8(*skb, (fcs >> 8) & 0xff); 1719 skb_put_u8(*skb, (fcs >> 16) & 0xff); 1720 skb_put_u8(*skb, (fcs >> 24) & 0xff); 1721 1722 return 0; 1723 } 1724 1725 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1726 { 1727 u16 queue_index = skb_get_queue_mapping(skb); 1728 struct macb *bp = netdev_priv(dev); 1729 struct macb_queue *queue = &bp->queues[queue_index]; 1730 unsigned long flags; 1731 unsigned int desc_cnt, nr_frags, frag_size, f; 1732 unsigned int hdrlen; 1733 bool is_lso, is_udp = 0; 1734 netdev_tx_t ret = NETDEV_TX_OK; 1735 1736 if (macb_clear_csum(skb)) { 1737 dev_kfree_skb_any(skb); 1738 return ret; 1739 } 1740 1741 if (macb_pad_and_fcs(&skb, dev)) { 1742 dev_kfree_skb_any(skb); 1743 return ret; 1744 } 1745 1746 is_lso = (skb_shinfo(skb)->gso_size != 0); 1747 1748 if (is_lso) { 1749 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); 1750 1751 /* length of headers */ 1752 if (is_udp) 1753 /* only queue eth + ip headers separately for UDP */ 1754 hdrlen = skb_transport_offset(skb); 1755 else 1756 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 1757 if (skb_headlen(skb) < hdrlen) { 1758 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 1759 /* if this is required, would need to copy to single buffer */ 1760 return NETDEV_TX_BUSY; 1761 } 1762 } else 1763 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 1764 1765 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1766 netdev_vdbg(bp->dev, 1767 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1768 queue_index, skb->len, skb->head, skb->data, 1769 skb_tail_pointer(skb), skb_end_pointer(skb)); 1770 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1771 skb->data, 16, true); 1772 #endif 1773 1774 /* Count how many TX buffer descriptors are needed to send this 1775 * socket buffer: skb fragments of jumbo frames may need to be 1776 * split into many buffer descriptors. 1777 */ 1778 if (is_lso && (skb_headlen(skb) > hdrlen)) 1779 /* extra header descriptor if also payload in first buffer */ 1780 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 1781 else 1782 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1783 nr_frags = skb_shinfo(skb)->nr_frags; 1784 for (f = 0; f < nr_frags; f++) { 1785 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1786 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1787 } 1788 1789 spin_lock_irqsave(&bp->lock, flags); 1790 1791 /* This is a hard error, log it. */ 1792 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 1793 bp->tx_ring_size) < desc_cnt) { 1794 netif_stop_subqueue(dev, queue_index); 1795 spin_unlock_irqrestore(&bp->lock, flags); 1796 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1797 queue->tx_head, queue->tx_tail); 1798 return NETDEV_TX_BUSY; 1799 } 1800 1801 /* Map socket buffer for DMA transfer */ 1802 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 1803 dev_kfree_skb_any(skb); 1804 goto unlock; 1805 } 1806 1807 /* Make newly initialized descriptor visible to hardware */ 1808 wmb(); 1809 skb_tx_timestamp(skb); 1810 1811 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1812 1813 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 1814 netif_stop_subqueue(dev, queue_index); 1815 1816 unlock: 1817 spin_unlock_irqrestore(&bp->lock, flags); 1818 1819 return ret; 1820 } 1821 1822 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 1823 { 1824 if (!macb_is_gem(bp)) { 1825 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 1826 } else { 1827 bp->rx_buffer_size = size; 1828 1829 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 1830 netdev_dbg(bp->dev, 1831 "RX buffer must be multiple of %d bytes, expanding\n", 1832 RX_BUFFER_MULTIPLE); 1833 bp->rx_buffer_size = 1834 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 1835 } 1836 } 1837 1838 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 1839 bp->dev->mtu, bp->rx_buffer_size); 1840 } 1841 1842 static void gem_free_rx_buffers(struct macb *bp) 1843 { 1844 struct sk_buff *skb; 1845 struct macb_dma_desc *desc; 1846 struct macb_queue *queue; 1847 dma_addr_t addr; 1848 unsigned int q; 1849 int i; 1850 1851 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1852 if (!queue->rx_skbuff) 1853 continue; 1854 1855 for (i = 0; i < bp->rx_ring_size; i++) { 1856 skb = queue->rx_skbuff[i]; 1857 1858 if (!skb) 1859 continue; 1860 1861 desc = macb_rx_desc(queue, i); 1862 addr = macb_get_addr(bp, desc); 1863 1864 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1865 DMA_FROM_DEVICE); 1866 dev_kfree_skb_any(skb); 1867 skb = NULL; 1868 } 1869 1870 kfree(queue->rx_skbuff); 1871 queue->rx_skbuff = NULL; 1872 } 1873 } 1874 1875 static void macb_free_rx_buffers(struct macb *bp) 1876 { 1877 struct macb_queue *queue = &bp->queues[0]; 1878 1879 if (queue->rx_buffers) { 1880 dma_free_coherent(&bp->pdev->dev, 1881 bp->rx_ring_size * bp->rx_buffer_size, 1882 queue->rx_buffers, queue->rx_buffers_dma); 1883 queue->rx_buffers = NULL; 1884 } 1885 } 1886 1887 static void macb_free_consistent(struct macb *bp) 1888 { 1889 struct macb_queue *queue; 1890 unsigned int q; 1891 int size; 1892 1893 bp->macbgem_ops.mog_free_rx_buffers(bp); 1894 1895 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1896 kfree(queue->tx_skb); 1897 queue->tx_skb = NULL; 1898 if (queue->tx_ring) { 1899 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 1900 dma_free_coherent(&bp->pdev->dev, size, 1901 queue->tx_ring, queue->tx_ring_dma); 1902 queue->tx_ring = NULL; 1903 } 1904 if (queue->rx_ring) { 1905 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 1906 dma_free_coherent(&bp->pdev->dev, size, 1907 queue->rx_ring, queue->rx_ring_dma); 1908 queue->rx_ring = NULL; 1909 } 1910 } 1911 } 1912 1913 static int gem_alloc_rx_buffers(struct macb *bp) 1914 { 1915 struct macb_queue *queue; 1916 unsigned int q; 1917 int size; 1918 1919 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1920 size = bp->rx_ring_size * sizeof(struct sk_buff *); 1921 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); 1922 if (!queue->rx_skbuff) 1923 return -ENOMEM; 1924 else 1925 netdev_dbg(bp->dev, 1926 "Allocated %d RX struct sk_buff entries at %p\n", 1927 bp->rx_ring_size, queue->rx_skbuff); 1928 } 1929 return 0; 1930 } 1931 1932 static int macb_alloc_rx_buffers(struct macb *bp) 1933 { 1934 struct macb_queue *queue = &bp->queues[0]; 1935 int size; 1936 1937 size = bp->rx_ring_size * bp->rx_buffer_size; 1938 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 1939 &queue->rx_buffers_dma, GFP_KERNEL); 1940 if (!queue->rx_buffers) 1941 return -ENOMEM; 1942 1943 netdev_dbg(bp->dev, 1944 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 1945 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); 1946 return 0; 1947 } 1948 1949 static int macb_alloc_consistent(struct macb *bp) 1950 { 1951 struct macb_queue *queue; 1952 unsigned int q; 1953 int size; 1954 1955 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1956 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 1957 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1958 &queue->tx_ring_dma, 1959 GFP_KERNEL); 1960 if (!queue->tx_ring) 1961 goto out_err; 1962 netdev_dbg(bp->dev, 1963 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 1964 q, size, (unsigned long)queue->tx_ring_dma, 1965 queue->tx_ring); 1966 1967 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 1968 queue->tx_skb = kmalloc(size, GFP_KERNEL); 1969 if (!queue->tx_skb) 1970 goto out_err; 1971 1972 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 1973 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1974 &queue->rx_ring_dma, GFP_KERNEL); 1975 if (!queue->rx_ring) 1976 goto out_err; 1977 netdev_dbg(bp->dev, 1978 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 1979 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); 1980 } 1981 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 1982 goto out_err; 1983 1984 return 0; 1985 1986 out_err: 1987 macb_free_consistent(bp); 1988 return -ENOMEM; 1989 } 1990 1991 static void gem_init_rings(struct macb *bp) 1992 { 1993 struct macb_queue *queue; 1994 struct macb_dma_desc *desc = NULL; 1995 unsigned int q; 1996 int i; 1997 1998 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1999 for (i = 0; i < bp->tx_ring_size; i++) { 2000 desc = macb_tx_desc(queue, i); 2001 macb_set_addr(bp, desc, 0); 2002 desc->ctrl = MACB_BIT(TX_USED); 2003 } 2004 desc->ctrl |= MACB_BIT(TX_WRAP); 2005 queue->tx_head = 0; 2006 queue->tx_tail = 0; 2007 2008 queue->rx_tail = 0; 2009 queue->rx_prepared_head = 0; 2010 2011 gem_rx_refill(queue); 2012 } 2013 2014 } 2015 2016 static void macb_init_rings(struct macb *bp) 2017 { 2018 int i; 2019 struct macb_dma_desc *desc = NULL; 2020 2021 macb_init_rx_ring(&bp->queues[0]); 2022 2023 for (i = 0; i < bp->tx_ring_size; i++) { 2024 desc = macb_tx_desc(&bp->queues[0], i); 2025 macb_set_addr(bp, desc, 0); 2026 desc->ctrl = MACB_BIT(TX_USED); 2027 } 2028 bp->queues[0].tx_head = 0; 2029 bp->queues[0].tx_tail = 0; 2030 desc->ctrl |= MACB_BIT(TX_WRAP); 2031 } 2032 2033 static void macb_reset_hw(struct macb *bp) 2034 { 2035 struct macb_queue *queue; 2036 unsigned int q; 2037 u32 ctrl = macb_readl(bp, NCR); 2038 2039 /* Disable RX and TX (XXX: Should we halt the transmission 2040 * more gracefully?) 2041 */ 2042 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 2043 2044 /* Clear the stats registers (XXX: Update stats first?) */ 2045 ctrl |= MACB_BIT(CLRSTAT); 2046 2047 macb_writel(bp, NCR, ctrl); 2048 2049 /* Clear all status flags */ 2050 macb_writel(bp, TSR, -1); 2051 macb_writel(bp, RSR, -1); 2052 2053 /* Disable all interrupts */ 2054 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2055 queue_writel(queue, IDR, -1); 2056 queue_readl(queue, ISR); 2057 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 2058 queue_writel(queue, ISR, -1); 2059 } 2060 } 2061 2062 static u32 gem_mdc_clk_div(struct macb *bp) 2063 { 2064 u32 config; 2065 unsigned long pclk_hz = clk_get_rate(bp->pclk); 2066 2067 if (pclk_hz <= 20000000) 2068 config = GEM_BF(CLK, GEM_CLK_DIV8); 2069 else if (pclk_hz <= 40000000) 2070 config = GEM_BF(CLK, GEM_CLK_DIV16); 2071 else if (pclk_hz <= 80000000) 2072 config = GEM_BF(CLK, GEM_CLK_DIV32); 2073 else if (pclk_hz <= 120000000) 2074 config = GEM_BF(CLK, GEM_CLK_DIV48); 2075 else if (pclk_hz <= 160000000) 2076 config = GEM_BF(CLK, GEM_CLK_DIV64); 2077 else 2078 config = GEM_BF(CLK, GEM_CLK_DIV96); 2079 2080 return config; 2081 } 2082 2083 static u32 macb_mdc_clk_div(struct macb *bp) 2084 { 2085 u32 config; 2086 unsigned long pclk_hz; 2087 2088 if (macb_is_gem(bp)) 2089 return gem_mdc_clk_div(bp); 2090 2091 pclk_hz = clk_get_rate(bp->pclk); 2092 if (pclk_hz <= 20000000) 2093 config = MACB_BF(CLK, MACB_CLK_DIV8); 2094 else if (pclk_hz <= 40000000) 2095 config = MACB_BF(CLK, MACB_CLK_DIV16); 2096 else if (pclk_hz <= 80000000) 2097 config = MACB_BF(CLK, MACB_CLK_DIV32); 2098 else 2099 config = MACB_BF(CLK, MACB_CLK_DIV64); 2100 2101 return config; 2102 } 2103 2104 /* Get the DMA bus width field of the network configuration register that we 2105 * should program. We find the width from decoding the design configuration 2106 * register to find the maximum supported data bus width. 2107 */ 2108 static u32 macb_dbw(struct macb *bp) 2109 { 2110 if (!macb_is_gem(bp)) 2111 return 0; 2112 2113 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 2114 case 4: 2115 return GEM_BF(DBW, GEM_DBW128); 2116 case 2: 2117 return GEM_BF(DBW, GEM_DBW64); 2118 case 1: 2119 default: 2120 return GEM_BF(DBW, GEM_DBW32); 2121 } 2122 } 2123 2124 /* Configure the receive DMA engine 2125 * - use the correct receive buffer size 2126 * - set best burst length for DMA operations 2127 * (if not supported by FIFO, it will fallback to default) 2128 * - set both rx/tx packet buffers to full memory size 2129 * These are configurable parameters for GEM. 2130 */ 2131 static void macb_configure_dma(struct macb *bp) 2132 { 2133 struct macb_queue *queue; 2134 u32 buffer_size; 2135 unsigned int q; 2136 u32 dmacfg; 2137 2138 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; 2139 if (macb_is_gem(bp)) { 2140 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 2141 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2142 if (q) 2143 queue_writel(queue, RBQS, buffer_size); 2144 else 2145 dmacfg |= GEM_BF(RXBS, buffer_size); 2146 } 2147 if (bp->dma_burst_length) 2148 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 2149 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 2150 dmacfg &= ~GEM_BIT(ENDIA_PKT); 2151 2152 if (bp->native_io) 2153 dmacfg &= ~GEM_BIT(ENDIA_DESC); 2154 else 2155 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 2156 2157 if (bp->dev->features & NETIF_F_HW_CSUM) 2158 dmacfg |= GEM_BIT(TXCOEN); 2159 else 2160 dmacfg &= ~GEM_BIT(TXCOEN); 2161 2162 dmacfg &= ~GEM_BIT(ADDR64); 2163 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2164 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2165 dmacfg |= GEM_BIT(ADDR64); 2166 #endif 2167 #ifdef CONFIG_MACB_USE_HWSTAMP 2168 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2169 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2170 #endif 2171 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2172 dmacfg); 2173 gem_writel(bp, DMACFG, dmacfg); 2174 } 2175 } 2176 2177 static void macb_init_hw(struct macb *bp) 2178 { 2179 struct macb_queue *queue; 2180 unsigned int q; 2181 2182 u32 config; 2183 2184 macb_reset_hw(bp); 2185 macb_set_hwaddr(bp); 2186 2187 config = macb_mdc_clk_div(bp); 2188 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 2189 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 2190 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2191 config |= MACB_BIT(PAE); /* PAuse Enable */ 2192 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2193 if (bp->caps & MACB_CAPS_JUMBO) 2194 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2195 else 2196 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2197 if (bp->dev->flags & IFF_PROMISC) 2198 config |= MACB_BIT(CAF); /* Copy All Frames */ 2199 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2200 config |= GEM_BIT(RXCOEN); 2201 if (!(bp->dev->flags & IFF_BROADCAST)) 2202 config |= MACB_BIT(NBC); /* No BroadCast */ 2203 config |= macb_dbw(bp); 2204 macb_writel(bp, NCFGR, config); 2205 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2206 gem_writel(bp, JML, bp->jumbo_max_len); 2207 bp->speed = SPEED_10; 2208 bp->duplex = DUPLEX_HALF; 2209 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2210 if (bp->caps & MACB_CAPS_JUMBO) 2211 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2212 2213 macb_configure_dma(bp); 2214 2215 /* Initialize TX and RX buffers */ 2216 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2217 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 2218 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2219 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2220 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma)); 2221 #endif 2222 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 2223 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2224 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2225 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 2226 #endif 2227 2228 /* Enable interrupts */ 2229 queue_writel(queue, IER, 2230 MACB_RX_INT_FLAGS | 2231 MACB_TX_INT_FLAGS | 2232 MACB_BIT(HRESP)); 2233 } 2234 2235 /* Enable TX and RX */ 2236 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); 2237 } 2238 2239 /* The hash address register is 64 bits long and takes up two 2240 * locations in the memory map. The least significant bits are stored 2241 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2242 * 2243 * The unicast hash enable and the multicast hash enable bits in the 2244 * network configuration register enable the reception of hash matched 2245 * frames. The destination address is reduced to a 6 bit index into 2246 * the 64 bit hash register using the following hash function. The 2247 * hash function is an exclusive or of every sixth bit of the 2248 * destination address. 2249 * 2250 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2251 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2252 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2253 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2254 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2255 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2256 * 2257 * da[0] represents the least significant bit of the first byte 2258 * received, that is, the multicast/unicast indicator, and da[47] 2259 * represents the most significant bit of the last byte received. If 2260 * the hash index, hi[n], points to a bit that is set in the hash 2261 * register then the frame will be matched according to whether the 2262 * frame is multicast or unicast. A multicast match will be signalled 2263 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2264 * index points to a bit set in the hash register. A unicast match 2265 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2266 * and the hash index points to a bit set in the hash register. To 2267 * receive all multicast frames, the hash register should be set with 2268 * all ones and the multicast hash enable bit should be set in the 2269 * network configuration register. 2270 */ 2271 2272 static inline int hash_bit_value(int bitnr, __u8 *addr) 2273 { 2274 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2275 return 1; 2276 return 0; 2277 } 2278 2279 /* Return the hash index value for the specified address. */ 2280 static int hash_get_index(__u8 *addr) 2281 { 2282 int i, j, bitval; 2283 int hash_index = 0; 2284 2285 for (j = 0; j < 6; j++) { 2286 for (i = 0, bitval = 0; i < 8; i++) 2287 bitval ^= hash_bit_value(i * 6 + j, addr); 2288 2289 hash_index |= (bitval << j); 2290 } 2291 2292 return hash_index; 2293 } 2294 2295 /* Add multicast addresses to the internal multicast-hash table. */ 2296 static void macb_sethashtable(struct net_device *dev) 2297 { 2298 struct netdev_hw_addr *ha; 2299 unsigned long mc_filter[2]; 2300 unsigned int bitnr; 2301 struct macb *bp = netdev_priv(dev); 2302 2303 mc_filter[0] = 0; 2304 mc_filter[1] = 0; 2305 2306 netdev_for_each_mc_addr(ha, dev) { 2307 bitnr = hash_get_index(ha->addr); 2308 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2309 } 2310 2311 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2312 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2313 } 2314 2315 /* Enable/Disable promiscuous and multicast modes. */ 2316 static void macb_set_rx_mode(struct net_device *dev) 2317 { 2318 unsigned long cfg; 2319 struct macb *bp = netdev_priv(dev); 2320 2321 cfg = macb_readl(bp, NCFGR); 2322 2323 if (dev->flags & IFF_PROMISC) { 2324 /* Enable promiscuous mode */ 2325 cfg |= MACB_BIT(CAF); 2326 2327 /* Disable RX checksum offload */ 2328 if (macb_is_gem(bp)) 2329 cfg &= ~GEM_BIT(RXCOEN); 2330 } else { 2331 /* Disable promiscuous mode */ 2332 cfg &= ~MACB_BIT(CAF); 2333 2334 /* Enable RX checksum offload only if requested */ 2335 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2336 cfg |= GEM_BIT(RXCOEN); 2337 } 2338 2339 if (dev->flags & IFF_ALLMULTI) { 2340 /* Enable all multicast mode */ 2341 macb_or_gem_writel(bp, HRB, -1); 2342 macb_or_gem_writel(bp, HRT, -1); 2343 cfg |= MACB_BIT(NCFGR_MTI); 2344 } else if (!netdev_mc_empty(dev)) { 2345 /* Enable specific multicasts */ 2346 macb_sethashtable(dev); 2347 cfg |= MACB_BIT(NCFGR_MTI); 2348 } else if (dev->flags & (~IFF_ALLMULTI)) { 2349 /* Disable all multicast mode */ 2350 macb_or_gem_writel(bp, HRB, 0); 2351 macb_or_gem_writel(bp, HRT, 0); 2352 cfg &= ~MACB_BIT(NCFGR_MTI); 2353 } 2354 2355 macb_writel(bp, NCFGR, cfg); 2356 } 2357 2358 static int macb_open(struct net_device *dev) 2359 { 2360 struct macb *bp = netdev_priv(dev); 2361 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2362 struct macb_queue *queue; 2363 unsigned int q; 2364 int err; 2365 2366 netdev_dbg(bp->dev, "open\n"); 2367 2368 /* carrier starts down */ 2369 netif_carrier_off(dev); 2370 2371 /* if the phy is not yet register, retry later*/ 2372 if (!dev->phydev) 2373 return -EAGAIN; 2374 2375 /* RX buffers initialization */ 2376 macb_init_rx_buffer_size(bp, bufsz); 2377 2378 err = macb_alloc_consistent(bp); 2379 if (err) { 2380 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2381 err); 2382 return err; 2383 } 2384 2385 bp->macbgem_ops.mog_init_rings(bp); 2386 macb_init_hw(bp); 2387 2388 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2389 napi_enable(&queue->napi); 2390 2391 /* schedule a link state check */ 2392 phy_start(dev->phydev); 2393 2394 netif_tx_start_all_queues(dev); 2395 2396 if (bp->ptp_info) 2397 bp->ptp_info->ptp_init(dev); 2398 2399 return 0; 2400 } 2401 2402 static int macb_close(struct net_device *dev) 2403 { 2404 struct macb *bp = netdev_priv(dev); 2405 struct macb_queue *queue; 2406 unsigned long flags; 2407 unsigned int q; 2408 2409 netif_tx_stop_all_queues(dev); 2410 2411 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2412 napi_disable(&queue->napi); 2413 2414 if (dev->phydev) 2415 phy_stop(dev->phydev); 2416 2417 spin_lock_irqsave(&bp->lock, flags); 2418 macb_reset_hw(bp); 2419 netif_carrier_off(dev); 2420 spin_unlock_irqrestore(&bp->lock, flags); 2421 2422 macb_free_consistent(bp); 2423 2424 if (bp->ptp_info) 2425 bp->ptp_info->ptp_remove(dev); 2426 2427 return 0; 2428 } 2429 2430 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2431 { 2432 if (netif_running(dev)) 2433 return -EBUSY; 2434 2435 dev->mtu = new_mtu; 2436 2437 return 0; 2438 } 2439 2440 static void gem_update_stats(struct macb *bp) 2441 { 2442 struct macb_queue *queue; 2443 unsigned int i, q, idx; 2444 unsigned long *stat; 2445 2446 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2447 2448 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2449 u32 offset = gem_statistics[i].offset; 2450 u64 val = bp->macb_reg_readl(bp, offset); 2451 2452 bp->ethtool_stats[i] += val; 2453 *p += val; 2454 2455 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2456 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2457 val = bp->macb_reg_readl(bp, offset + 4); 2458 bp->ethtool_stats[i] += ((u64)val) << 32; 2459 *(++p) += val; 2460 } 2461 } 2462 2463 idx = GEM_STATS_LEN; 2464 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2465 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) 2466 bp->ethtool_stats[idx++] = *stat; 2467 } 2468 2469 static struct net_device_stats *gem_get_stats(struct macb *bp) 2470 { 2471 struct gem_stats *hwstat = &bp->hw_stats.gem; 2472 struct net_device_stats *nstat = &bp->dev->stats; 2473 2474 gem_update_stats(bp); 2475 2476 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2477 hwstat->rx_alignment_errors + 2478 hwstat->rx_resource_errors + 2479 hwstat->rx_overruns + 2480 hwstat->rx_oversize_frames + 2481 hwstat->rx_jabbers + 2482 hwstat->rx_undersized_frames + 2483 hwstat->rx_length_field_frame_errors); 2484 nstat->tx_errors = (hwstat->tx_late_collisions + 2485 hwstat->tx_excessive_collisions + 2486 hwstat->tx_underrun + 2487 hwstat->tx_carrier_sense_errors); 2488 nstat->multicast = hwstat->rx_multicast_frames; 2489 nstat->collisions = (hwstat->tx_single_collision_frames + 2490 hwstat->tx_multiple_collision_frames + 2491 hwstat->tx_excessive_collisions); 2492 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2493 hwstat->rx_jabbers + 2494 hwstat->rx_undersized_frames + 2495 hwstat->rx_length_field_frame_errors); 2496 nstat->rx_over_errors = hwstat->rx_resource_errors; 2497 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2498 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2499 nstat->rx_fifo_errors = hwstat->rx_overruns; 2500 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2501 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2502 nstat->tx_fifo_errors = hwstat->tx_underrun; 2503 2504 return nstat; 2505 } 2506 2507 static void gem_get_ethtool_stats(struct net_device *dev, 2508 struct ethtool_stats *stats, u64 *data) 2509 { 2510 struct macb *bp; 2511 2512 bp = netdev_priv(dev); 2513 gem_update_stats(bp); 2514 memcpy(data, &bp->ethtool_stats, sizeof(u64) 2515 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); 2516 } 2517 2518 static int gem_get_sset_count(struct net_device *dev, int sset) 2519 { 2520 struct macb *bp = netdev_priv(dev); 2521 2522 switch (sset) { 2523 case ETH_SS_STATS: 2524 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; 2525 default: 2526 return -EOPNOTSUPP; 2527 } 2528 } 2529 2530 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2531 { 2532 char stat_string[ETH_GSTRING_LEN]; 2533 struct macb *bp = netdev_priv(dev); 2534 struct macb_queue *queue; 2535 unsigned int i; 2536 unsigned int q; 2537 2538 switch (sset) { 2539 case ETH_SS_STATS: 2540 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2541 memcpy(p, gem_statistics[i].stat_string, 2542 ETH_GSTRING_LEN); 2543 2544 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2545 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { 2546 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", 2547 q, queue_statistics[i].stat_string); 2548 memcpy(p, stat_string, ETH_GSTRING_LEN); 2549 } 2550 } 2551 break; 2552 } 2553 } 2554 2555 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2556 { 2557 struct macb *bp = netdev_priv(dev); 2558 struct net_device_stats *nstat = &bp->dev->stats; 2559 struct macb_stats *hwstat = &bp->hw_stats.macb; 2560 2561 if (macb_is_gem(bp)) 2562 return gem_get_stats(bp); 2563 2564 /* read stats from hardware */ 2565 macb_update_stats(bp); 2566 2567 /* Convert HW stats into netdevice stats */ 2568 nstat->rx_errors = (hwstat->rx_fcs_errors + 2569 hwstat->rx_align_errors + 2570 hwstat->rx_resource_errors + 2571 hwstat->rx_overruns + 2572 hwstat->rx_oversize_pkts + 2573 hwstat->rx_jabbers + 2574 hwstat->rx_undersize_pkts + 2575 hwstat->rx_length_mismatch); 2576 nstat->tx_errors = (hwstat->tx_late_cols + 2577 hwstat->tx_excessive_cols + 2578 hwstat->tx_underruns + 2579 hwstat->tx_carrier_errors + 2580 hwstat->sqe_test_errors); 2581 nstat->collisions = (hwstat->tx_single_cols + 2582 hwstat->tx_multiple_cols + 2583 hwstat->tx_excessive_cols); 2584 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2585 hwstat->rx_jabbers + 2586 hwstat->rx_undersize_pkts + 2587 hwstat->rx_length_mismatch); 2588 nstat->rx_over_errors = hwstat->rx_resource_errors + 2589 hwstat->rx_overruns; 2590 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2591 nstat->rx_frame_errors = hwstat->rx_align_errors; 2592 nstat->rx_fifo_errors = hwstat->rx_overruns; 2593 /* XXX: What does "missed" mean? */ 2594 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2595 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2596 nstat->tx_fifo_errors = hwstat->tx_underruns; 2597 /* Don't know about heartbeat or window errors... */ 2598 2599 return nstat; 2600 } 2601 2602 static int macb_get_regs_len(struct net_device *netdev) 2603 { 2604 return MACB_GREGS_NBR * sizeof(u32); 2605 } 2606 2607 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2608 void *p) 2609 { 2610 struct macb *bp = netdev_priv(dev); 2611 unsigned int tail, head; 2612 u32 *regs_buff = p; 2613 2614 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2615 | MACB_GREGS_VERSION; 2616 2617 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2618 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2619 2620 regs_buff[0] = macb_readl(bp, NCR); 2621 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2622 regs_buff[2] = macb_readl(bp, NSR); 2623 regs_buff[3] = macb_readl(bp, TSR); 2624 regs_buff[4] = macb_readl(bp, RBQP); 2625 regs_buff[5] = macb_readl(bp, TBQP); 2626 regs_buff[6] = macb_readl(bp, RSR); 2627 regs_buff[7] = macb_readl(bp, IMR); 2628 2629 regs_buff[8] = tail; 2630 regs_buff[9] = head; 2631 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2632 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2633 2634 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2635 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2636 if (macb_is_gem(bp)) 2637 regs_buff[13] = gem_readl(bp, DMACFG); 2638 } 2639 2640 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2641 { 2642 struct macb *bp = netdev_priv(netdev); 2643 2644 wol->supported = 0; 2645 wol->wolopts = 0; 2646 2647 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2648 wol->supported = WAKE_MAGIC; 2649 2650 if (bp->wol & MACB_WOL_ENABLED) 2651 wol->wolopts |= WAKE_MAGIC; 2652 } 2653 } 2654 2655 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2656 { 2657 struct macb *bp = netdev_priv(netdev); 2658 2659 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2660 (wol->wolopts & ~WAKE_MAGIC)) 2661 return -EOPNOTSUPP; 2662 2663 if (wol->wolopts & WAKE_MAGIC) 2664 bp->wol |= MACB_WOL_ENABLED; 2665 else 2666 bp->wol &= ~MACB_WOL_ENABLED; 2667 2668 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2669 2670 return 0; 2671 } 2672 2673 static void macb_get_ringparam(struct net_device *netdev, 2674 struct ethtool_ringparam *ring) 2675 { 2676 struct macb *bp = netdev_priv(netdev); 2677 2678 ring->rx_max_pending = MAX_RX_RING_SIZE; 2679 ring->tx_max_pending = MAX_TX_RING_SIZE; 2680 2681 ring->rx_pending = bp->rx_ring_size; 2682 ring->tx_pending = bp->tx_ring_size; 2683 } 2684 2685 static int macb_set_ringparam(struct net_device *netdev, 2686 struct ethtool_ringparam *ring) 2687 { 2688 struct macb *bp = netdev_priv(netdev); 2689 u32 new_rx_size, new_tx_size; 2690 unsigned int reset = 0; 2691 2692 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2693 return -EINVAL; 2694 2695 new_rx_size = clamp_t(u32, ring->rx_pending, 2696 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 2697 new_rx_size = roundup_pow_of_two(new_rx_size); 2698 2699 new_tx_size = clamp_t(u32, ring->tx_pending, 2700 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 2701 new_tx_size = roundup_pow_of_two(new_tx_size); 2702 2703 if ((new_tx_size == bp->tx_ring_size) && 2704 (new_rx_size == bp->rx_ring_size)) { 2705 /* nothing to do */ 2706 return 0; 2707 } 2708 2709 if (netif_running(bp->dev)) { 2710 reset = 1; 2711 macb_close(bp->dev); 2712 } 2713 2714 bp->rx_ring_size = new_rx_size; 2715 bp->tx_ring_size = new_tx_size; 2716 2717 if (reset) 2718 macb_open(bp->dev); 2719 2720 return 0; 2721 } 2722 2723 #ifdef CONFIG_MACB_USE_HWSTAMP 2724 static unsigned int gem_get_tsu_rate(struct macb *bp) 2725 { 2726 struct clk *tsu_clk; 2727 unsigned int tsu_rate; 2728 2729 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 2730 if (!IS_ERR(tsu_clk)) 2731 tsu_rate = clk_get_rate(tsu_clk); 2732 /* try pclk instead */ 2733 else if (!IS_ERR(bp->pclk)) { 2734 tsu_clk = bp->pclk; 2735 tsu_rate = clk_get_rate(tsu_clk); 2736 } else 2737 return -ENOTSUPP; 2738 return tsu_rate; 2739 } 2740 2741 static s32 gem_get_ptp_max_adj(void) 2742 { 2743 return 64000000; 2744 } 2745 2746 static int gem_get_ts_info(struct net_device *dev, 2747 struct ethtool_ts_info *info) 2748 { 2749 struct macb *bp = netdev_priv(dev); 2750 2751 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 2752 ethtool_op_get_ts_info(dev, info); 2753 return 0; 2754 } 2755 2756 info->so_timestamping = 2757 SOF_TIMESTAMPING_TX_SOFTWARE | 2758 SOF_TIMESTAMPING_RX_SOFTWARE | 2759 SOF_TIMESTAMPING_SOFTWARE | 2760 SOF_TIMESTAMPING_TX_HARDWARE | 2761 SOF_TIMESTAMPING_RX_HARDWARE | 2762 SOF_TIMESTAMPING_RAW_HARDWARE; 2763 info->tx_types = 2764 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 2765 (1 << HWTSTAMP_TX_OFF) | 2766 (1 << HWTSTAMP_TX_ON); 2767 info->rx_filters = 2768 (1 << HWTSTAMP_FILTER_NONE) | 2769 (1 << HWTSTAMP_FILTER_ALL); 2770 2771 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 2772 2773 return 0; 2774 } 2775 2776 static struct macb_ptp_info gem_ptp_info = { 2777 .ptp_init = gem_ptp_init, 2778 .ptp_remove = gem_ptp_remove, 2779 .get_ptp_max_adj = gem_get_ptp_max_adj, 2780 .get_tsu_rate = gem_get_tsu_rate, 2781 .get_ts_info = gem_get_ts_info, 2782 .get_hwtst = gem_get_hwtst, 2783 .set_hwtst = gem_set_hwtst, 2784 }; 2785 #endif 2786 2787 static int macb_get_ts_info(struct net_device *netdev, 2788 struct ethtool_ts_info *info) 2789 { 2790 struct macb *bp = netdev_priv(netdev); 2791 2792 if (bp->ptp_info) 2793 return bp->ptp_info->get_ts_info(netdev, info); 2794 2795 return ethtool_op_get_ts_info(netdev, info); 2796 } 2797 2798 static void gem_enable_flow_filters(struct macb *bp, bool enable) 2799 { 2800 struct ethtool_rx_fs_item *item; 2801 u32 t2_scr; 2802 int num_t2_scr; 2803 2804 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); 2805 2806 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2807 struct ethtool_rx_flow_spec *fs = &item->fs; 2808 struct ethtool_tcpip4_spec *tp4sp_m; 2809 2810 if (fs->location >= num_t2_scr) 2811 continue; 2812 2813 t2_scr = gem_readl_n(bp, SCRT2, fs->location); 2814 2815 /* enable/disable screener regs for the flow entry */ 2816 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); 2817 2818 /* only enable fields with no masking */ 2819 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 2820 2821 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) 2822 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); 2823 else 2824 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); 2825 2826 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) 2827 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); 2828 else 2829 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); 2830 2831 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) 2832 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); 2833 else 2834 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); 2835 2836 gem_writel_n(bp, SCRT2, fs->location, t2_scr); 2837 } 2838 } 2839 2840 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) 2841 { 2842 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; 2843 uint16_t index = fs->location; 2844 u32 w0, w1, t2_scr; 2845 bool cmp_a = false; 2846 bool cmp_b = false; 2847 bool cmp_c = false; 2848 2849 tp4sp_v = &(fs->h_u.tcp_ip4_spec); 2850 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 2851 2852 /* ignore field if any masking set */ 2853 if (tp4sp_m->ip4src == 0xFFFFFFFF) { 2854 /* 1st compare reg - IP source address */ 2855 w0 = 0; 2856 w1 = 0; 2857 w0 = tp4sp_v->ip4src; 2858 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2859 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 2860 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); 2861 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); 2862 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); 2863 cmp_a = true; 2864 } 2865 2866 /* ignore field if any masking set */ 2867 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { 2868 /* 2nd compare reg - IP destination address */ 2869 w0 = 0; 2870 w1 = 0; 2871 w0 = tp4sp_v->ip4dst; 2872 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2873 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 2874 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); 2875 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); 2876 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); 2877 cmp_b = true; 2878 } 2879 2880 /* ignore both port fields if masking set in both */ 2881 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { 2882 /* 3rd compare reg - source port, destination port */ 2883 w0 = 0; 2884 w1 = 0; 2885 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); 2886 if (tp4sp_m->psrc == tp4sp_m->pdst) { 2887 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); 2888 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 2889 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2890 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 2891 } else { 2892 /* only one port definition */ 2893 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ 2894 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); 2895 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ 2896 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); 2897 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 2898 } else { /* dst port */ 2899 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 2900 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); 2901 } 2902 } 2903 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); 2904 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); 2905 cmp_c = true; 2906 } 2907 2908 t2_scr = 0; 2909 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); 2910 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); 2911 if (cmp_a) 2912 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); 2913 if (cmp_b) 2914 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); 2915 if (cmp_c) 2916 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); 2917 gem_writel_n(bp, SCRT2, index, t2_scr); 2918 } 2919 2920 static int gem_add_flow_filter(struct net_device *netdev, 2921 struct ethtool_rxnfc *cmd) 2922 { 2923 struct macb *bp = netdev_priv(netdev); 2924 struct ethtool_rx_flow_spec *fs = &cmd->fs; 2925 struct ethtool_rx_fs_item *item, *newfs; 2926 unsigned long flags; 2927 int ret = -EINVAL; 2928 bool added = false; 2929 2930 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); 2931 if (newfs == NULL) 2932 return -ENOMEM; 2933 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); 2934 2935 netdev_dbg(netdev, 2936 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 2937 fs->flow_type, (int)fs->ring_cookie, fs->location, 2938 htonl(fs->h_u.tcp_ip4_spec.ip4src), 2939 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 2940 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); 2941 2942 spin_lock_irqsave(&bp->rx_fs_lock, flags); 2943 2944 /* find correct place to add in list */ 2945 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2946 if (item->fs.location > newfs->fs.location) { 2947 list_add_tail(&newfs->list, &item->list); 2948 added = true; 2949 break; 2950 } else if (item->fs.location == fs->location) { 2951 netdev_err(netdev, "Rule not added: location %d not free!\n", 2952 fs->location); 2953 ret = -EBUSY; 2954 goto err; 2955 } 2956 } 2957 if (!added) 2958 list_add_tail(&newfs->list, &bp->rx_fs_list.list); 2959 2960 gem_prog_cmp_regs(bp, fs); 2961 bp->rx_fs_list.count++; 2962 /* enable filtering if NTUPLE on */ 2963 if (netdev->features & NETIF_F_NTUPLE) 2964 gem_enable_flow_filters(bp, 1); 2965 2966 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 2967 return 0; 2968 2969 err: 2970 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 2971 kfree(newfs); 2972 return ret; 2973 } 2974 2975 static int gem_del_flow_filter(struct net_device *netdev, 2976 struct ethtool_rxnfc *cmd) 2977 { 2978 struct macb *bp = netdev_priv(netdev); 2979 struct ethtool_rx_fs_item *item; 2980 struct ethtool_rx_flow_spec *fs; 2981 unsigned long flags; 2982 2983 spin_lock_irqsave(&bp->rx_fs_lock, flags); 2984 2985 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2986 if (item->fs.location == cmd->fs.location) { 2987 /* disable screener regs for the flow entry */ 2988 fs = &(item->fs); 2989 netdev_dbg(netdev, 2990 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 2991 fs->flow_type, (int)fs->ring_cookie, fs->location, 2992 htonl(fs->h_u.tcp_ip4_spec.ip4src), 2993 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 2994 htons(fs->h_u.tcp_ip4_spec.psrc), 2995 htons(fs->h_u.tcp_ip4_spec.pdst)); 2996 2997 gem_writel_n(bp, SCRT2, fs->location, 0); 2998 2999 list_del(&item->list); 3000 bp->rx_fs_list.count--; 3001 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3002 kfree(item); 3003 return 0; 3004 } 3005 } 3006 3007 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3008 return -EINVAL; 3009 } 3010 3011 static int gem_get_flow_entry(struct net_device *netdev, 3012 struct ethtool_rxnfc *cmd) 3013 { 3014 struct macb *bp = netdev_priv(netdev); 3015 struct ethtool_rx_fs_item *item; 3016 3017 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3018 if (item->fs.location == cmd->fs.location) { 3019 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); 3020 return 0; 3021 } 3022 } 3023 return -EINVAL; 3024 } 3025 3026 static int gem_get_all_flow_entries(struct net_device *netdev, 3027 struct ethtool_rxnfc *cmd, u32 *rule_locs) 3028 { 3029 struct macb *bp = netdev_priv(netdev); 3030 struct ethtool_rx_fs_item *item; 3031 uint32_t cnt = 0; 3032 3033 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3034 if (cnt == cmd->rule_cnt) 3035 return -EMSGSIZE; 3036 rule_locs[cnt] = item->fs.location; 3037 cnt++; 3038 } 3039 cmd->data = bp->max_tuples; 3040 cmd->rule_cnt = cnt; 3041 3042 return 0; 3043 } 3044 3045 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3046 u32 *rule_locs) 3047 { 3048 struct macb *bp = netdev_priv(netdev); 3049 int ret = 0; 3050 3051 switch (cmd->cmd) { 3052 case ETHTOOL_GRXRINGS: 3053 cmd->data = bp->num_queues; 3054 break; 3055 case ETHTOOL_GRXCLSRLCNT: 3056 cmd->rule_cnt = bp->rx_fs_list.count; 3057 break; 3058 case ETHTOOL_GRXCLSRULE: 3059 ret = gem_get_flow_entry(netdev, cmd); 3060 break; 3061 case ETHTOOL_GRXCLSRLALL: 3062 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); 3063 break; 3064 default: 3065 netdev_err(netdev, 3066 "Command parameter %d is not supported\n", cmd->cmd); 3067 ret = -EOPNOTSUPP; 3068 } 3069 3070 return ret; 3071 } 3072 3073 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3074 { 3075 struct macb *bp = netdev_priv(netdev); 3076 int ret; 3077 3078 switch (cmd->cmd) { 3079 case ETHTOOL_SRXCLSRLINS: 3080 if ((cmd->fs.location >= bp->max_tuples) 3081 || (cmd->fs.ring_cookie >= bp->num_queues)) { 3082 ret = -EINVAL; 3083 break; 3084 } 3085 ret = gem_add_flow_filter(netdev, cmd); 3086 break; 3087 case ETHTOOL_SRXCLSRLDEL: 3088 ret = gem_del_flow_filter(netdev, cmd); 3089 break; 3090 default: 3091 netdev_err(netdev, 3092 "Command parameter %d is not supported\n", cmd->cmd); 3093 ret = -EOPNOTSUPP; 3094 } 3095 3096 return ret; 3097 } 3098 3099 static const struct ethtool_ops macb_ethtool_ops = { 3100 .get_regs_len = macb_get_regs_len, 3101 .get_regs = macb_get_regs, 3102 .get_link = ethtool_op_get_link, 3103 .get_ts_info = ethtool_op_get_ts_info, 3104 .get_wol = macb_get_wol, 3105 .set_wol = macb_set_wol, 3106 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3107 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3108 .get_ringparam = macb_get_ringparam, 3109 .set_ringparam = macb_set_ringparam, 3110 }; 3111 3112 static const struct ethtool_ops gem_ethtool_ops = { 3113 .get_regs_len = macb_get_regs_len, 3114 .get_regs = macb_get_regs, 3115 .get_link = ethtool_op_get_link, 3116 .get_ts_info = macb_get_ts_info, 3117 .get_ethtool_stats = gem_get_ethtool_stats, 3118 .get_strings = gem_get_ethtool_strings, 3119 .get_sset_count = gem_get_sset_count, 3120 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3121 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3122 .get_ringparam = macb_get_ringparam, 3123 .set_ringparam = macb_set_ringparam, 3124 .get_rxnfc = gem_get_rxnfc, 3125 .set_rxnfc = gem_set_rxnfc, 3126 }; 3127 3128 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3129 { 3130 struct phy_device *phydev = dev->phydev; 3131 struct macb *bp = netdev_priv(dev); 3132 3133 if (!netif_running(dev)) 3134 return -EINVAL; 3135 3136 if (!phydev) 3137 return -ENODEV; 3138 3139 if (!bp->ptp_info) 3140 return phy_mii_ioctl(phydev, rq, cmd); 3141 3142 switch (cmd) { 3143 case SIOCSHWTSTAMP: 3144 return bp->ptp_info->set_hwtst(dev, rq, cmd); 3145 case SIOCGHWTSTAMP: 3146 return bp->ptp_info->get_hwtst(dev, rq); 3147 default: 3148 return phy_mii_ioctl(phydev, rq, cmd); 3149 } 3150 } 3151 3152 static int macb_set_features(struct net_device *netdev, 3153 netdev_features_t features) 3154 { 3155 struct macb *bp = netdev_priv(netdev); 3156 netdev_features_t changed = features ^ netdev->features; 3157 3158 /* TX checksum offload */ 3159 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { 3160 u32 dmacfg; 3161 3162 dmacfg = gem_readl(bp, DMACFG); 3163 if (features & NETIF_F_HW_CSUM) 3164 dmacfg |= GEM_BIT(TXCOEN); 3165 else 3166 dmacfg &= ~GEM_BIT(TXCOEN); 3167 gem_writel(bp, DMACFG, dmacfg); 3168 } 3169 3170 /* RX checksum offload */ 3171 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { 3172 u32 netcfg; 3173 3174 netcfg = gem_readl(bp, NCFGR); 3175 if (features & NETIF_F_RXCSUM && 3176 !(netdev->flags & IFF_PROMISC)) 3177 netcfg |= GEM_BIT(RXCOEN); 3178 else 3179 netcfg &= ~GEM_BIT(RXCOEN); 3180 gem_writel(bp, NCFGR, netcfg); 3181 } 3182 3183 /* RX Flow Filters */ 3184 if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) { 3185 bool turn_on = features & NETIF_F_NTUPLE; 3186 3187 gem_enable_flow_filters(bp, turn_on); 3188 } 3189 return 0; 3190 } 3191 3192 static const struct net_device_ops macb_netdev_ops = { 3193 .ndo_open = macb_open, 3194 .ndo_stop = macb_close, 3195 .ndo_start_xmit = macb_start_xmit, 3196 .ndo_set_rx_mode = macb_set_rx_mode, 3197 .ndo_get_stats = macb_get_stats, 3198 .ndo_do_ioctl = macb_ioctl, 3199 .ndo_validate_addr = eth_validate_addr, 3200 .ndo_change_mtu = macb_change_mtu, 3201 .ndo_set_mac_address = eth_mac_addr, 3202 #ifdef CONFIG_NET_POLL_CONTROLLER 3203 .ndo_poll_controller = macb_poll_controller, 3204 #endif 3205 .ndo_set_features = macb_set_features, 3206 .ndo_features_check = macb_features_check, 3207 }; 3208 3209 /* Configure peripheral capabilities according to device tree 3210 * and integration options used 3211 */ 3212 static void macb_configure_caps(struct macb *bp, 3213 const struct macb_config *dt_conf) 3214 { 3215 u32 dcfg; 3216 3217 if (dt_conf) 3218 bp->caps = dt_conf->caps; 3219 3220 if (hw_is_gem(bp->regs, bp->native_io)) { 3221 bp->caps |= MACB_CAPS_MACB_IS_GEM; 3222 3223 dcfg = gem_readl(bp, DCFG1); 3224 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 3225 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 3226 dcfg = gem_readl(bp, DCFG2); 3227 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 3228 bp->caps |= MACB_CAPS_FIFO_MODE; 3229 #ifdef CONFIG_MACB_USE_HWSTAMP 3230 if (gem_has_ptp(bp)) { 3231 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 3232 pr_err("GEM doesn't support hardware ptp.\n"); 3233 else { 3234 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 3235 bp->ptp_info = &gem_ptp_info; 3236 } 3237 } 3238 #endif 3239 } 3240 3241 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 3242 } 3243 3244 static void macb_probe_queues(void __iomem *mem, 3245 bool native_io, 3246 unsigned int *queue_mask, 3247 unsigned int *num_queues) 3248 { 3249 unsigned int hw_q; 3250 3251 *queue_mask = 0x1; 3252 *num_queues = 1; 3253 3254 /* is it macb or gem ? 3255 * 3256 * We need to read directly from the hardware here because 3257 * we are early in the probe process and don't have the 3258 * MACB_CAPS_MACB_IS_GEM flag positioned 3259 */ 3260 if (!hw_is_gem(mem, native_io)) 3261 return; 3262 3263 /* bit 0 is never set but queue 0 always exists */ 3264 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; 3265 3266 *queue_mask |= 0x1; 3267 3268 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 3269 if (*queue_mask & (1 << hw_q)) 3270 (*num_queues)++; 3271 } 3272 3273 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 3274 struct clk **hclk, struct clk **tx_clk, 3275 struct clk **rx_clk) 3276 { 3277 struct macb_platform_data *pdata; 3278 int err; 3279 3280 pdata = dev_get_platdata(&pdev->dev); 3281 if (pdata) { 3282 *pclk = pdata->pclk; 3283 *hclk = pdata->hclk; 3284 } else { 3285 *pclk = devm_clk_get(&pdev->dev, "pclk"); 3286 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3287 } 3288 3289 if (IS_ERR(*pclk)) { 3290 err = PTR_ERR(*pclk); 3291 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 3292 return err; 3293 } 3294 3295 if (IS_ERR(*hclk)) { 3296 err = PTR_ERR(*hclk); 3297 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 3298 return err; 3299 } 3300 3301 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 3302 if (IS_ERR(*tx_clk)) 3303 *tx_clk = NULL; 3304 3305 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); 3306 if (IS_ERR(*rx_clk)) 3307 *rx_clk = NULL; 3308 3309 err = clk_prepare_enable(*pclk); 3310 if (err) { 3311 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 3312 return err; 3313 } 3314 3315 err = clk_prepare_enable(*hclk); 3316 if (err) { 3317 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 3318 goto err_disable_pclk; 3319 } 3320 3321 err = clk_prepare_enable(*tx_clk); 3322 if (err) { 3323 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 3324 goto err_disable_hclk; 3325 } 3326 3327 err = clk_prepare_enable(*rx_clk); 3328 if (err) { 3329 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 3330 goto err_disable_txclk; 3331 } 3332 3333 return 0; 3334 3335 err_disable_txclk: 3336 clk_disable_unprepare(*tx_clk); 3337 3338 err_disable_hclk: 3339 clk_disable_unprepare(*hclk); 3340 3341 err_disable_pclk: 3342 clk_disable_unprepare(*pclk); 3343 3344 return err; 3345 } 3346 3347 static int macb_init(struct platform_device *pdev) 3348 { 3349 struct net_device *dev = platform_get_drvdata(pdev); 3350 unsigned int hw_q, q; 3351 struct macb *bp = netdev_priv(dev); 3352 struct macb_queue *queue; 3353 int err; 3354 u32 val, reg; 3355 3356 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 3357 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 3358 3359 /* set the queue register mapping once for all: queue0 has a special 3360 * register mapping but we don't want to test the queue index then 3361 * compute the corresponding register offset at run time. 3362 */ 3363 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 3364 if (!(bp->queue_mask & (1 << hw_q))) 3365 continue; 3366 3367 queue = &bp->queues[q]; 3368 queue->bp = bp; 3369 netif_napi_add(dev, &queue->napi, macb_poll, 64); 3370 if (hw_q) { 3371 queue->ISR = GEM_ISR(hw_q - 1); 3372 queue->IER = GEM_IER(hw_q - 1); 3373 queue->IDR = GEM_IDR(hw_q - 1); 3374 queue->IMR = GEM_IMR(hw_q - 1); 3375 queue->TBQP = GEM_TBQP(hw_q - 1); 3376 queue->RBQP = GEM_RBQP(hw_q - 1); 3377 queue->RBQS = GEM_RBQS(hw_q - 1); 3378 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3379 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3380 queue->TBQPH = GEM_TBQPH(hw_q - 1); 3381 queue->RBQPH = GEM_RBQPH(hw_q - 1); 3382 } 3383 #endif 3384 } else { 3385 /* queue0 uses legacy registers */ 3386 queue->ISR = MACB_ISR; 3387 queue->IER = MACB_IER; 3388 queue->IDR = MACB_IDR; 3389 queue->IMR = MACB_IMR; 3390 queue->TBQP = MACB_TBQP; 3391 queue->RBQP = MACB_RBQP; 3392 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3393 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3394 queue->TBQPH = MACB_TBQPH; 3395 queue->RBQPH = MACB_RBQPH; 3396 } 3397 #endif 3398 } 3399 3400 /* get irq: here we use the linux queue index, not the hardware 3401 * queue index. the queue irq definitions in the device tree 3402 * must remove the optional gaps that could exist in the 3403 * hardware queue mask. 3404 */ 3405 queue->irq = platform_get_irq(pdev, q); 3406 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 3407 IRQF_SHARED, dev->name, queue); 3408 if (err) { 3409 dev_err(&pdev->dev, 3410 "Unable to request IRQ %d (error %d)\n", 3411 queue->irq, err); 3412 return err; 3413 } 3414 3415 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 3416 q++; 3417 } 3418 3419 dev->netdev_ops = &macb_netdev_ops; 3420 3421 /* setup appropriated routines according to adapter type */ 3422 if (macb_is_gem(bp)) { 3423 bp->max_tx_length = GEM_MAX_TX_LEN; 3424 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 3425 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 3426 bp->macbgem_ops.mog_init_rings = gem_init_rings; 3427 bp->macbgem_ops.mog_rx = gem_rx; 3428 dev->ethtool_ops = &gem_ethtool_ops; 3429 } else { 3430 bp->max_tx_length = MACB_MAX_TX_LEN; 3431 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 3432 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 3433 bp->macbgem_ops.mog_init_rings = macb_init_rings; 3434 bp->macbgem_ops.mog_rx = macb_rx; 3435 dev->ethtool_ops = &macb_ethtool_ops; 3436 } 3437 3438 /* Set features */ 3439 dev->hw_features = NETIF_F_SG; 3440 3441 /* Check LSO capability */ 3442 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 3443 dev->hw_features |= MACB_NETIF_LSO; 3444 3445 /* Checksum offload is only available on gem with packet buffer */ 3446 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 3447 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3448 if (bp->caps & MACB_CAPS_SG_DISABLED) 3449 dev->hw_features &= ~NETIF_F_SG; 3450 dev->features = dev->hw_features; 3451 3452 /* Check RX Flow Filters support. 3453 * Max Rx flows set by availability of screeners & compare regs: 3454 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 3455 */ 3456 reg = gem_readl(bp, DCFG8); 3457 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 3458 GEM_BFEXT(T2SCR, reg)); 3459 if (bp->max_tuples > 0) { 3460 /* also needs one ethtype match to check IPv4 */ 3461 if (GEM_BFEXT(SCR2ETH, reg) > 0) { 3462 /* program this reg now */ 3463 reg = 0; 3464 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); 3465 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); 3466 /* Filtering is supported in hw but don't enable it in kernel now */ 3467 dev->hw_features |= NETIF_F_NTUPLE; 3468 /* init Rx flow definitions */ 3469 INIT_LIST_HEAD(&bp->rx_fs_list.list); 3470 bp->rx_fs_list.count = 0; 3471 spin_lock_init(&bp->rx_fs_lock); 3472 } else 3473 bp->max_tuples = 0; 3474 } 3475 3476 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3477 val = 0; 3478 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 3479 val = GEM_BIT(RGMII); 3480 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3481 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3482 val = MACB_BIT(RMII); 3483 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3484 val = MACB_BIT(MII); 3485 3486 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 3487 val |= MACB_BIT(CLKEN); 3488 3489 macb_or_gem_writel(bp, USRIO, val); 3490 } 3491 3492 /* Set MII management clock divider */ 3493 val = macb_mdc_clk_div(bp); 3494 val |= macb_dbw(bp); 3495 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 3496 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 3497 macb_writel(bp, NCFGR, val); 3498 3499 return 0; 3500 } 3501 3502 #if defined(CONFIG_OF) 3503 /* 1518 rounded up */ 3504 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3505 /* max number of receive buffers */ 3506 #define AT91ETHER_MAX_RX_DESCR 9 3507 3508 /* Initialize and start the Receiver and Transmit subsystems */ 3509 static int at91ether_start(struct net_device *dev) 3510 { 3511 struct macb *lp = netdev_priv(dev); 3512 struct macb_queue *q = &lp->queues[0]; 3513 struct macb_dma_desc *desc; 3514 dma_addr_t addr; 3515 u32 ctl; 3516 int i; 3517 3518 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 3519 (AT91ETHER_MAX_RX_DESCR * 3520 macb_dma_desc_get_size(lp)), 3521 &q->rx_ring_dma, GFP_KERNEL); 3522 if (!q->rx_ring) 3523 return -ENOMEM; 3524 3525 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 3526 AT91ETHER_MAX_RX_DESCR * 3527 AT91ETHER_MAX_RBUFF_SZ, 3528 &q->rx_buffers_dma, GFP_KERNEL); 3529 if (!q->rx_buffers) { 3530 dma_free_coherent(&lp->pdev->dev, 3531 AT91ETHER_MAX_RX_DESCR * 3532 macb_dma_desc_get_size(lp), 3533 q->rx_ring, q->rx_ring_dma); 3534 q->rx_ring = NULL; 3535 return -ENOMEM; 3536 } 3537 3538 addr = q->rx_buffers_dma; 3539 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 3540 desc = macb_rx_desc(q, i); 3541 macb_set_addr(lp, desc, addr); 3542 desc->ctrl = 0; 3543 addr += AT91ETHER_MAX_RBUFF_SZ; 3544 } 3545 3546 /* Set the Wrap bit on the last descriptor */ 3547 desc->addr |= MACB_BIT(RX_WRAP); 3548 3549 /* Reset buffer index */ 3550 q->rx_tail = 0; 3551 3552 /* Program address of descriptor list in Rx Buffer Queue register */ 3553 macb_writel(lp, RBQP, q->rx_ring_dma); 3554 3555 /* Enable Receive and Transmit */ 3556 ctl = macb_readl(lp, NCR); 3557 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 3558 3559 return 0; 3560 } 3561 3562 /* Open the ethernet interface */ 3563 static int at91ether_open(struct net_device *dev) 3564 { 3565 struct macb *lp = netdev_priv(dev); 3566 u32 ctl; 3567 int ret; 3568 3569 /* Clear internal statistics */ 3570 ctl = macb_readl(lp, NCR); 3571 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 3572 3573 macb_set_hwaddr(lp); 3574 3575 ret = at91ether_start(dev); 3576 if (ret) 3577 return ret; 3578 3579 /* Enable MAC interrupts */ 3580 macb_writel(lp, IER, MACB_BIT(RCOMP) | 3581 MACB_BIT(RXUBR) | 3582 MACB_BIT(ISR_TUND) | 3583 MACB_BIT(ISR_RLE) | 3584 MACB_BIT(TCOMP) | 3585 MACB_BIT(ISR_ROVR) | 3586 MACB_BIT(HRESP)); 3587 3588 /* schedule a link state check */ 3589 phy_start(dev->phydev); 3590 3591 netif_start_queue(dev); 3592 3593 return 0; 3594 } 3595 3596 /* Close the interface */ 3597 static int at91ether_close(struct net_device *dev) 3598 { 3599 struct macb *lp = netdev_priv(dev); 3600 struct macb_queue *q = &lp->queues[0]; 3601 u32 ctl; 3602 3603 /* Disable Receiver and Transmitter */ 3604 ctl = macb_readl(lp, NCR); 3605 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 3606 3607 /* Disable MAC interrupts */ 3608 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 3609 MACB_BIT(RXUBR) | 3610 MACB_BIT(ISR_TUND) | 3611 MACB_BIT(ISR_RLE) | 3612 MACB_BIT(TCOMP) | 3613 MACB_BIT(ISR_ROVR) | 3614 MACB_BIT(HRESP)); 3615 3616 netif_stop_queue(dev); 3617 3618 dma_free_coherent(&lp->pdev->dev, 3619 AT91ETHER_MAX_RX_DESCR * 3620 macb_dma_desc_get_size(lp), 3621 q->rx_ring, q->rx_ring_dma); 3622 q->rx_ring = NULL; 3623 3624 dma_free_coherent(&lp->pdev->dev, 3625 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, 3626 q->rx_buffers, q->rx_buffers_dma); 3627 q->rx_buffers = NULL; 3628 3629 return 0; 3630 } 3631 3632 /* Transmit packet */ 3633 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, 3634 struct net_device *dev) 3635 { 3636 struct macb *lp = netdev_priv(dev); 3637 3638 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 3639 netif_stop_queue(dev); 3640 3641 /* Store packet information (to free when Tx completed) */ 3642 lp->skb = skb; 3643 lp->skb_length = skb->len; 3644 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 3645 DMA_TO_DEVICE); 3646 if (dma_mapping_error(NULL, lp->skb_physaddr)) { 3647 dev_kfree_skb_any(skb); 3648 dev->stats.tx_dropped++; 3649 netdev_err(dev, "%s: DMA mapping error\n", __func__); 3650 return NETDEV_TX_OK; 3651 } 3652 3653 /* Set address of the data in the Transmit Address register */ 3654 macb_writel(lp, TAR, lp->skb_physaddr); 3655 /* Set length of the packet in the Transmit Control register */ 3656 macb_writel(lp, TCR, skb->len); 3657 3658 } else { 3659 netdev_err(dev, "%s called, but device is busy!\n", __func__); 3660 return NETDEV_TX_BUSY; 3661 } 3662 3663 return NETDEV_TX_OK; 3664 } 3665 3666 /* Extract received frame from buffer descriptors and sent to upper layers. 3667 * (Called from interrupt context) 3668 */ 3669 static void at91ether_rx(struct net_device *dev) 3670 { 3671 struct macb *lp = netdev_priv(dev); 3672 struct macb_queue *q = &lp->queues[0]; 3673 struct macb_dma_desc *desc; 3674 unsigned char *p_recv; 3675 struct sk_buff *skb; 3676 unsigned int pktlen; 3677 3678 desc = macb_rx_desc(q, q->rx_tail); 3679 while (desc->addr & MACB_BIT(RX_USED)) { 3680 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 3681 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 3682 skb = netdev_alloc_skb(dev, pktlen + 2); 3683 if (skb) { 3684 skb_reserve(skb, 2); 3685 skb_put_data(skb, p_recv, pktlen); 3686 3687 skb->protocol = eth_type_trans(skb, dev); 3688 dev->stats.rx_packets++; 3689 dev->stats.rx_bytes += pktlen; 3690 netif_rx(skb); 3691 } else { 3692 dev->stats.rx_dropped++; 3693 } 3694 3695 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 3696 dev->stats.multicast++; 3697 3698 /* reset ownership bit */ 3699 desc->addr &= ~MACB_BIT(RX_USED); 3700 3701 /* wrap after last buffer */ 3702 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 3703 q->rx_tail = 0; 3704 else 3705 q->rx_tail++; 3706 3707 desc = macb_rx_desc(q, q->rx_tail); 3708 } 3709 } 3710 3711 /* MAC interrupt handler */ 3712 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 3713 { 3714 struct net_device *dev = dev_id; 3715 struct macb *lp = netdev_priv(dev); 3716 u32 intstatus, ctl; 3717 3718 /* MAC Interrupt Status register indicates what interrupts are pending. 3719 * It is automatically cleared once read. 3720 */ 3721 intstatus = macb_readl(lp, ISR); 3722 3723 /* Receive complete */ 3724 if (intstatus & MACB_BIT(RCOMP)) 3725 at91ether_rx(dev); 3726 3727 /* Transmit complete */ 3728 if (intstatus & MACB_BIT(TCOMP)) { 3729 /* The TCOM bit is set even if the transmission failed */ 3730 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 3731 dev->stats.tx_errors++; 3732 3733 if (lp->skb) { 3734 dev_kfree_skb_irq(lp->skb); 3735 lp->skb = NULL; 3736 dma_unmap_single(NULL, lp->skb_physaddr, 3737 lp->skb_length, DMA_TO_DEVICE); 3738 dev->stats.tx_packets++; 3739 dev->stats.tx_bytes += lp->skb_length; 3740 } 3741 netif_wake_queue(dev); 3742 } 3743 3744 /* Work-around for EMAC Errata section 41.3.1 */ 3745 if (intstatus & MACB_BIT(RXUBR)) { 3746 ctl = macb_readl(lp, NCR); 3747 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 3748 wmb(); 3749 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 3750 } 3751 3752 if (intstatus & MACB_BIT(ISR_ROVR)) 3753 netdev_err(dev, "ROVR error\n"); 3754 3755 return IRQ_HANDLED; 3756 } 3757 3758 #ifdef CONFIG_NET_POLL_CONTROLLER 3759 static void at91ether_poll_controller(struct net_device *dev) 3760 { 3761 unsigned long flags; 3762 3763 local_irq_save(flags); 3764 at91ether_interrupt(dev->irq, dev); 3765 local_irq_restore(flags); 3766 } 3767 #endif 3768 3769 static const struct net_device_ops at91ether_netdev_ops = { 3770 .ndo_open = at91ether_open, 3771 .ndo_stop = at91ether_close, 3772 .ndo_start_xmit = at91ether_start_xmit, 3773 .ndo_get_stats = macb_get_stats, 3774 .ndo_set_rx_mode = macb_set_rx_mode, 3775 .ndo_set_mac_address = eth_mac_addr, 3776 .ndo_do_ioctl = macb_ioctl, 3777 .ndo_validate_addr = eth_validate_addr, 3778 #ifdef CONFIG_NET_POLL_CONTROLLER 3779 .ndo_poll_controller = at91ether_poll_controller, 3780 #endif 3781 }; 3782 3783 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 3784 struct clk **hclk, struct clk **tx_clk, 3785 struct clk **rx_clk) 3786 { 3787 int err; 3788 3789 *hclk = NULL; 3790 *tx_clk = NULL; 3791 *rx_clk = NULL; 3792 3793 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 3794 if (IS_ERR(*pclk)) 3795 return PTR_ERR(*pclk); 3796 3797 err = clk_prepare_enable(*pclk); 3798 if (err) { 3799 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 3800 return err; 3801 } 3802 3803 return 0; 3804 } 3805 3806 static int at91ether_init(struct platform_device *pdev) 3807 { 3808 struct net_device *dev = platform_get_drvdata(pdev); 3809 struct macb *bp = netdev_priv(dev); 3810 int err; 3811 u32 reg; 3812 3813 bp->queues[0].bp = bp; 3814 3815 dev->netdev_ops = &at91ether_netdev_ops; 3816 dev->ethtool_ops = &macb_ethtool_ops; 3817 3818 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 3819 0, dev->name, dev); 3820 if (err) 3821 return err; 3822 3823 macb_writel(bp, NCR, 0); 3824 3825 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); 3826 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 3827 reg |= MACB_BIT(RM9200_RMII); 3828 3829 macb_writel(bp, NCFGR, reg); 3830 3831 return 0; 3832 } 3833 3834 static const struct macb_config at91sam9260_config = { 3835 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3836 .clk_init = macb_clk_init, 3837 .init = macb_init, 3838 }; 3839 3840 static const struct macb_config sama5d3macb_config = { 3841 .caps = MACB_CAPS_SG_DISABLED 3842 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3843 .clk_init = macb_clk_init, 3844 .init = macb_init, 3845 }; 3846 3847 static const struct macb_config pc302gem_config = { 3848 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 3849 .dma_burst_length = 16, 3850 .clk_init = macb_clk_init, 3851 .init = macb_init, 3852 }; 3853 3854 static const struct macb_config sama5d2_config = { 3855 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3856 .dma_burst_length = 16, 3857 .clk_init = macb_clk_init, 3858 .init = macb_init, 3859 }; 3860 3861 static const struct macb_config sama5d3_config = { 3862 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 3863 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 3864 .dma_burst_length = 16, 3865 .clk_init = macb_clk_init, 3866 .init = macb_init, 3867 .jumbo_max_len = 10240, 3868 }; 3869 3870 static const struct macb_config sama5d4_config = { 3871 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3872 .dma_burst_length = 4, 3873 .clk_init = macb_clk_init, 3874 .init = macb_init, 3875 }; 3876 3877 static const struct macb_config emac_config = { 3878 .clk_init = at91ether_clk_init, 3879 .init = at91ether_init, 3880 }; 3881 3882 static const struct macb_config np4_config = { 3883 .caps = MACB_CAPS_USRIO_DISABLED, 3884 .clk_init = macb_clk_init, 3885 .init = macb_init, 3886 }; 3887 3888 static const struct macb_config zynqmp_config = { 3889 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3890 MACB_CAPS_JUMBO | 3891 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, 3892 .dma_burst_length = 16, 3893 .clk_init = macb_clk_init, 3894 .init = macb_init, 3895 .jumbo_max_len = 10240, 3896 }; 3897 3898 static const struct macb_config zynq_config = { 3899 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 3900 .dma_burst_length = 16, 3901 .clk_init = macb_clk_init, 3902 .init = macb_init, 3903 }; 3904 3905 static const struct of_device_id macb_dt_ids[] = { 3906 { .compatible = "cdns,at32ap7000-macb" }, 3907 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 3908 { .compatible = "cdns,macb" }, 3909 { .compatible = "cdns,np4-macb", .data = &np4_config }, 3910 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 3911 { .compatible = "cdns,gem", .data = &pc302gem_config }, 3912 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 3913 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 3914 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, 3915 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 3916 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 3917 { .compatible = "cdns,emac", .data = &emac_config }, 3918 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 3919 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 3920 { /* sentinel */ } 3921 }; 3922 MODULE_DEVICE_TABLE(of, macb_dt_ids); 3923 #endif /* CONFIG_OF */ 3924 3925 static const struct macb_config default_gem_config = { 3926 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3927 MACB_CAPS_JUMBO | 3928 MACB_CAPS_GEM_HAS_PTP, 3929 .dma_burst_length = 16, 3930 .clk_init = macb_clk_init, 3931 .init = macb_init, 3932 .jumbo_max_len = 10240, 3933 }; 3934 3935 static int macb_probe(struct platform_device *pdev) 3936 { 3937 const struct macb_config *macb_config = &default_gem_config; 3938 int (*clk_init)(struct platform_device *, struct clk **, 3939 struct clk **, struct clk **, struct clk **) 3940 = macb_config->clk_init; 3941 int (*init)(struct platform_device *) = macb_config->init; 3942 struct device_node *np = pdev->dev.of_node; 3943 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 3944 unsigned int queue_mask, num_queues; 3945 struct macb_platform_data *pdata; 3946 bool native_io; 3947 struct phy_device *phydev; 3948 struct net_device *dev; 3949 struct resource *regs; 3950 void __iomem *mem; 3951 const char *mac; 3952 struct macb *bp; 3953 int err, val; 3954 3955 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3956 mem = devm_ioremap_resource(&pdev->dev, regs); 3957 if (IS_ERR(mem)) 3958 return PTR_ERR(mem); 3959 3960 if (np) { 3961 const struct of_device_id *match; 3962 3963 match = of_match_node(macb_dt_ids, np); 3964 if (match && match->data) { 3965 macb_config = match->data; 3966 clk_init = macb_config->clk_init; 3967 init = macb_config->init; 3968 } 3969 } 3970 3971 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); 3972 if (err) 3973 return err; 3974 3975 native_io = hw_is_native_io(mem); 3976 3977 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 3978 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 3979 if (!dev) { 3980 err = -ENOMEM; 3981 goto err_disable_clocks; 3982 } 3983 3984 dev->base_addr = regs->start; 3985 3986 SET_NETDEV_DEV(dev, &pdev->dev); 3987 3988 bp = netdev_priv(dev); 3989 bp->pdev = pdev; 3990 bp->dev = dev; 3991 bp->regs = mem; 3992 bp->native_io = native_io; 3993 if (native_io) { 3994 bp->macb_reg_readl = hw_readl_native; 3995 bp->macb_reg_writel = hw_writel_native; 3996 } else { 3997 bp->macb_reg_readl = hw_readl; 3998 bp->macb_reg_writel = hw_writel; 3999 } 4000 bp->num_queues = num_queues; 4001 bp->queue_mask = queue_mask; 4002 if (macb_config) 4003 bp->dma_burst_length = macb_config->dma_burst_length; 4004 bp->pclk = pclk; 4005 bp->hclk = hclk; 4006 bp->tx_clk = tx_clk; 4007 bp->rx_clk = rx_clk; 4008 if (macb_config) 4009 bp->jumbo_max_len = macb_config->jumbo_max_len; 4010 4011 bp->wol = 0; 4012 if (of_get_property(np, "magic-packet", NULL)) 4013 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 4014 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4015 4016 spin_lock_init(&bp->lock); 4017 4018 /* setup capabilities */ 4019 macb_configure_caps(bp, macb_config); 4020 4021 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4022 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 4023 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 4024 bp->hw_dma_cap |= HW_DMA_CAP_64B; 4025 } 4026 #endif 4027 platform_set_drvdata(pdev, dev); 4028 4029 dev->irq = platform_get_irq(pdev, 0); 4030 if (dev->irq < 0) { 4031 err = dev->irq; 4032 goto err_out_free_netdev; 4033 } 4034 4035 /* MTU range: 68 - 1500 or 10240 */ 4036 dev->min_mtu = GEM_MTU_MIN_SIZE; 4037 if (bp->caps & MACB_CAPS_JUMBO) 4038 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 4039 else 4040 dev->max_mtu = ETH_DATA_LEN; 4041 4042 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { 4043 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); 4044 if (val) 4045 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * 4046 macb_dma_desc_get_size(bp); 4047 4048 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); 4049 if (val) 4050 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * 4051 macb_dma_desc_get_size(bp); 4052 } 4053 4054 mac = of_get_mac_address(np); 4055 if (mac) { 4056 ether_addr_copy(bp->dev->dev_addr, mac); 4057 } else { 4058 err = of_get_nvmem_mac_address(np, bp->dev->dev_addr); 4059 if (err) { 4060 if (err == -EPROBE_DEFER) 4061 goto err_out_free_netdev; 4062 macb_get_hwaddr(bp); 4063 } 4064 } 4065 4066 err = of_get_phy_mode(np); 4067 if (err < 0) { 4068 pdata = dev_get_platdata(&pdev->dev); 4069 if (pdata && pdata->is_rmii) 4070 bp->phy_interface = PHY_INTERFACE_MODE_RMII; 4071 else 4072 bp->phy_interface = PHY_INTERFACE_MODE_MII; 4073 } else { 4074 bp->phy_interface = err; 4075 } 4076 4077 /* IP specific init */ 4078 err = init(pdev); 4079 if (err) 4080 goto err_out_free_netdev; 4081 4082 err = macb_mii_init(bp); 4083 if (err) 4084 goto err_out_free_netdev; 4085 4086 phydev = dev->phydev; 4087 4088 netif_carrier_off(dev); 4089 4090 err = register_netdev(dev); 4091 if (err) { 4092 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 4093 goto err_out_unregister_mdio; 4094 } 4095 4096 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, 4097 (unsigned long)bp); 4098 4099 phy_attached_info(phydev); 4100 4101 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 4102 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 4103 dev->base_addr, dev->irq, dev->dev_addr); 4104 4105 return 0; 4106 4107 err_out_unregister_mdio: 4108 phy_disconnect(dev->phydev); 4109 mdiobus_unregister(bp->mii_bus); 4110 of_node_put(bp->phy_node); 4111 if (np && of_phy_is_fixed_link(np)) 4112 of_phy_deregister_fixed_link(np); 4113 mdiobus_free(bp->mii_bus); 4114 4115 err_out_free_netdev: 4116 free_netdev(dev); 4117 4118 err_disable_clocks: 4119 clk_disable_unprepare(tx_clk); 4120 clk_disable_unprepare(hclk); 4121 clk_disable_unprepare(pclk); 4122 clk_disable_unprepare(rx_clk); 4123 4124 return err; 4125 } 4126 4127 static int macb_remove(struct platform_device *pdev) 4128 { 4129 struct net_device *dev; 4130 struct macb *bp; 4131 struct device_node *np = pdev->dev.of_node; 4132 4133 dev = platform_get_drvdata(pdev); 4134 4135 if (dev) { 4136 bp = netdev_priv(dev); 4137 if (dev->phydev) 4138 phy_disconnect(dev->phydev); 4139 mdiobus_unregister(bp->mii_bus); 4140 if (np && of_phy_is_fixed_link(np)) 4141 of_phy_deregister_fixed_link(np); 4142 dev->phydev = NULL; 4143 mdiobus_free(bp->mii_bus); 4144 4145 unregister_netdev(dev); 4146 clk_disable_unprepare(bp->tx_clk); 4147 clk_disable_unprepare(bp->hclk); 4148 clk_disable_unprepare(bp->pclk); 4149 clk_disable_unprepare(bp->rx_clk); 4150 of_node_put(bp->phy_node); 4151 free_netdev(dev); 4152 } 4153 4154 return 0; 4155 } 4156 4157 static int __maybe_unused macb_suspend(struct device *dev) 4158 { 4159 struct net_device *netdev = dev_get_drvdata(dev); 4160 struct macb *bp = netdev_priv(netdev); 4161 4162 netif_carrier_off(netdev); 4163 netif_device_detach(netdev); 4164 4165 if (bp->wol & MACB_WOL_ENABLED) { 4166 macb_writel(bp, IER, MACB_BIT(WOL)); 4167 macb_writel(bp, WOL, MACB_BIT(MAG)); 4168 enable_irq_wake(bp->queues[0].irq); 4169 } else { 4170 clk_disable_unprepare(bp->tx_clk); 4171 clk_disable_unprepare(bp->hclk); 4172 clk_disable_unprepare(bp->pclk); 4173 clk_disable_unprepare(bp->rx_clk); 4174 } 4175 4176 return 0; 4177 } 4178 4179 static int __maybe_unused macb_resume(struct device *dev) 4180 { 4181 struct net_device *netdev = dev_get_drvdata(dev); 4182 struct macb *bp = netdev_priv(netdev); 4183 4184 if (bp->wol & MACB_WOL_ENABLED) { 4185 macb_writel(bp, IDR, MACB_BIT(WOL)); 4186 macb_writel(bp, WOL, 0); 4187 disable_irq_wake(bp->queues[0].irq); 4188 } else { 4189 clk_prepare_enable(bp->pclk); 4190 clk_prepare_enable(bp->hclk); 4191 clk_prepare_enable(bp->tx_clk); 4192 clk_prepare_enable(bp->rx_clk); 4193 } 4194 4195 netif_device_attach(netdev); 4196 4197 return 0; 4198 } 4199 4200 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); 4201 4202 static struct platform_driver macb_driver = { 4203 .probe = macb_probe, 4204 .remove = macb_remove, 4205 .driver = { 4206 .name = "macb", 4207 .of_match_table = of_match_ptr(macb_dt_ids), 4208 .pm = &macb_pm_ops, 4209 }, 4210 }; 4211 4212 module_platform_driver(macb_driver); 4213 4214 MODULE_LICENSE("GPL"); 4215 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 4216 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 4217 MODULE_ALIAS("platform:macb"); 4218