1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Cadence MACB/GEM Ethernet Controller driver 4 * 5 * Copyright (C) 2004-2006 Atmel Corporation 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/crc32.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/kernel.h> 15 #include <linux/types.h> 16 #include <linux/circ_buf.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/gpio.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/interrupt.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/platform_data/macb.h> 27 #include <linux/platform_device.h> 28 #include <linux/phylink.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/of_gpio.h> 32 #include <linux/of_mdio.h> 33 #include <linux/of_net.h> 34 #include <linux/ip.h> 35 #include <linux/udp.h> 36 #include <linux/tcp.h> 37 #include <linux/iopoll.h> 38 #include <linux/pm_runtime.h> 39 #include "macb.h" 40 41 /* This structure is only used for MACB on SiFive FU540 devices */ 42 struct sifive_fu540_macb_mgmt { 43 void __iomem *reg; 44 unsigned long rate; 45 struct clk_hw hw; 46 }; 47 48 #define MACB_RX_BUFFER_SIZE 128 49 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 50 51 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 52 #define MIN_RX_RING_SIZE 64 53 #define MAX_RX_RING_SIZE 8192 54 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 55 * (bp)->rx_ring_size) 56 57 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 58 #define MIN_TX_RING_SIZE 64 59 #define MAX_TX_RING_SIZE 4096 60 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 61 * (bp)->tx_ring_size) 62 63 /* level of occupied TX descriptors under which we wake up TX process */ 64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 65 66 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) 67 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 68 | MACB_BIT(ISR_RLE) \ 69 | MACB_BIT(TXERR)) 70 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ 71 | MACB_BIT(TXUBR)) 72 73 /* Max length of transmit frame must be a multiple of 8 bytes */ 74 #define MACB_TX_LEN_ALIGN 8 75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 76 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a 77 * false amba_error in TX path from the DMA assuming there is not enough 78 * space in the SRAM (16KB) even when there is. 79 */ 80 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) 81 82 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 83 #define MACB_NETIF_LSO NETIF_F_TSO 84 85 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 86 #define MACB_WOL_ENABLED (0x1 << 1) 87 88 /* Graceful stop timeouts in us. We should allow up to 89 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 90 */ 91 #define MACB_HALT_TIMEOUT 1230 92 93 #define MACB_PM_TIMEOUT 100 /* ms */ 94 95 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ 96 97 /* DMA buffer descriptor might be different size 98 * depends on hardware configuration: 99 * 100 * 1. dma address width 32 bits: 101 * word 1: 32 bit address of Data Buffer 102 * word 2: control 103 * 104 * 2. dma address width 64 bits: 105 * word 1: 32 bit address of Data Buffer 106 * word 2: control 107 * word 3: upper 32 bit address of Data Buffer 108 * word 4: unused 109 * 110 * 3. dma address width 32 bits with hardware timestamping: 111 * word 1: 32 bit address of Data Buffer 112 * word 2: control 113 * word 3: timestamp word 1 114 * word 4: timestamp word 2 115 * 116 * 4. dma address width 64 bits with hardware timestamping: 117 * word 1: 32 bit address of Data Buffer 118 * word 2: control 119 * word 3: upper 32 bit address of Data Buffer 120 * word 4: unused 121 * word 5: timestamp word 1 122 * word 6: timestamp word 2 123 */ 124 static unsigned int macb_dma_desc_get_size(struct macb *bp) 125 { 126 #ifdef MACB_EXT_DESC 127 unsigned int desc_size; 128 129 switch (bp->hw_dma_cap) { 130 case HW_DMA_CAP_64B: 131 desc_size = sizeof(struct macb_dma_desc) 132 + sizeof(struct macb_dma_desc_64); 133 break; 134 case HW_DMA_CAP_PTP: 135 desc_size = sizeof(struct macb_dma_desc) 136 + sizeof(struct macb_dma_desc_ptp); 137 break; 138 case HW_DMA_CAP_64B_PTP: 139 desc_size = sizeof(struct macb_dma_desc) 140 + sizeof(struct macb_dma_desc_64) 141 + sizeof(struct macb_dma_desc_ptp); 142 break; 143 default: 144 desc_size = sizeof(struct macb_dma_desc); 145 } 146 return desc_size; 147 #endif 148 return sizeof(struct macb_dma_desc); 149 } 150 151 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 152 { 153 #ifdef MACB_EXT_DESC 154 switch (bp->hw_dma_cap) { 155 case HW_DMA_CAP_64B: 156 case HW_DMA_CAP_PTP: 157 desc_idx <<= 1; 158 break; 159 case HW_DMA_CAP_64B_PTP: 160 desc_idx *= 3; 161 break; 162 default: 163 break; 164 } 165 #endif 166 return desc_idx; 167 } 168 169 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 170 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 171 { 172 return (struct macb_dma_desc_64 *)((void *)desc 173 + sizeof(struct macb_dma_desc)); 174 } 175 #endif 176 177 /* Ring buffer accessors */ 178 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 179 { 180 return index & (bp->tx_ring_size - 1); 181 } 182 183 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 184 unsigned int index) 185 { 186 index = macb_tx_ring_wrap(queue->bp, index); 187 index = macb_adj_dma_desc_idx(queue->bp, index); 188 return &queue->tx_ring[index]; 189 } 190 191 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 192 unsigned int index) 193 { 194 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 195 } 196 197 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 198 { 199 dma_addr_t offset; 200 201 offset = macb_tx_ring_wrap(queue->bp, index) * 202 macb_dma_desc_get_size(queue->bp); 203 204 return queue->tx_ring_dma + offset; 205 } 206 207 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 208 { 209 return index & (bp->rx_ring_size - 1); 210 } 211 212 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) 213 { 214 index = macb_rx_ring_wrap(queue->bp, index); 215 index = macb_adj_dma_desc_idx(queue->bp, index); 216 return &queue->rx_ring[index]; 217 } 218 219 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) 220 { 221 return queue->rx_buffers + queue->bp->rx_buffer_size * 222 macb_rx_ring_wrap(queue->bp, index); 223 } 224 225 /* I/O accessors */ 226 static u32 hw_readl_native(struct macb *bp, int offset) 227 { 228 return __raw_readl(bp->regs + offset); 229 } 230 231 static void hw_writel_native(struct macb *bp, int offset, u32 value) 232 { 233 __raw_writel(value, bp->regs + offset); 234 } 235 236 static u32 hw_readl(struct macb *bp, int offset) 237 { 238 return readl_relaxed(bp->regs + offset); 239 } 240 241 static void hw_writel(struct macb *bp, int offset, u32 value) 242 { 243 writel_relaxed(value, bp->regs + offset); 244 } 245 246 /* Find the CPU endianness by using the loopback bit of NCR register. When the 247 * CPU is in big endian we need to program swapped mode for management 248 * descriptor access. 249 */ 250 static bool hw_is_native_io(void __iomem *addr) 251 { 252 u32 value = MACB_BIT(LLB); 253 254 __raw_writel(value, addr + MACB_NCR); 255 value = __raw_readl(addr + MACB_NCR); 256 257 /* Write 0 back to disable everything */ 258 __raw_writel(0, addr + MACB_NCR); 259 260 return value == MACB_BIT(LLB); 261 } 262 263 static bool hw_is_gem(void __iomem *addr, bool native_io) 264 { 265 u32 id; 266 267 if (native_io) 268 id = __raw_readl(addr + MACB_MID); 269 else 270 id = readl_relaxed(addr + MACB_MID); 271 272 return MACB_BFEXT(IDNUM, id) >= 0x2; 273 } 274 275 static void macb_set_hwaddr(struct macb *bp) 276 { 277 u32 bottom; 278 u16 top; 279 280 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 281 macb_or_gem_writel(bp, SA1B, bottom); 282 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 283 macb_or_gem_writel(bp, SA1T, top); 284 285 /* Clear unused address register sets */ 286 macb_or_gem_writel(bp, SA2B, 0); 287 macb_or_gem_writel(bp, SA2T, 0); 288 macb_or_gem_writel(bp, SA3B, 0); 289 macb_or_gem_writel(bp, SA3T, 0); 290 macb_or_gem_writel(bp, SA4B, 0); 291 macb_or_gem_writel(bp, SA4T, 0); 292 } 293 294 static void macb_get_hwaddr(struct macb *bp) 295 { 296 u32 bottom; 297 u16 top; 298 u8 addr[6]; 299 int i; 300 301 /* Check all 4 address register for valid address */ 302 for (i = 0; i < 4; i++) { 303 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 304 top = macb_or_gem_readl(bp, SA1T + i * 8); 305 306 addr[0] = bottom & 0xff; 307 addr[1] = (bottom >> 8) & 0xff; 308 addr[2] = (bottom >> 16) & 0xff; 309 addr[3] = (bottom >> 24) & 0xff; 310 addr[4] = top & 0xff; 311 addr[5] = (top >> 8) & 0xff; 312 313 if (is_valid_ether_addr(addr)) { 314 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 315 return; 316 } 317 } 318 319 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 320 eth_hw_addr_random(bp->dev); 321 } 322 323 static int macb_mdio_wait_for_idle(struct macb *bp) 324 { 325 u32 val; 326 327 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), 328 1, MACB_MDIO_TIMEOUT); 329 } 330 331 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 332 { 333 struct macb *bp = bus->priv; 334 int status; 335 336 status = pm_runtime_get_sync(&bp->pdev->dev); 337 if (status < 0) 338 goto mdio_pm_exit; 339 340 status = macb_mdio_wait_for_idle(bp); 341 if (status < 0) 342 goto mdio_read_exit; 343 344 if (regnum & MII_ADDR_C45) { 345 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 346 | MACB_BF(RW, MACB_MAN_C45_ADDR) 347 | MACB_BF(PHYA, mii_id) 348 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 349 | MACB_BF(DATA, regnum & 0xFFFF) 350 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 351 352 status = macb_mdio_wait_for_idle(bp); 353 if (status < 0) 354 goto mdio_read_exit; 355 356 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 357 | MACB_BF(RW, MACB_MAN_C45_READ) 358 | MACB_BF(PHYA, mii_id) 359 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 360 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 361 } else { 362 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 363 | MACB_BF(RW, MACB_MAN_C22_READ) 364 | MACB_BF(PHYA, mii_id) 365 | MACB_BF(REGA, regnum) 366 | MACB_BF(CODE, MACB_MAN_C22_CODE))); 367 } 368 369 status = macb_mdio_wait_for_idle(bp); 370 if (status < 0) 371 goto mdio_read_exit; 372 373 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 374 375 mdio_read_exit: 376 pm_runtime_mark_last_busy(&bp->pdev->dev); 377 pm_runtime_put_autosuspend(&bp->pdev->dev); 378 mdio_pm_exit: 379 return status; 380 } 381 382 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 383 u16 value) 384 { 385 struct macb *bp = bus->priv; 386 int status; 387 388 status = pm_runtime_get_sync(&bp->pdev->dev); 389 if (status < 0) 390 goto mdio_pm_exit; 391 392 status = macb_mdio_wait_for_idle(bp); 393 if (status < 0) 394 goto mdio_write_exit; 395 396 if (regnum & MII_ADDR_C45) { 397 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 398 | MACB_BF(RW, MACB_MAN_C45_ADDR) 399 | MACB_BF(PHYA, mii_id) 400 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 401 | MACB_BF(DATA, regnum & 0xFFFF) 402 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 403 404 status = macb_mdio_wait_for_idle(bp); 405 if (status < 0) 406 goto mdio_write_exit; 407 408 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 409 | MACB_BF(RW, MACB_MAN_C45_WRITE) 410 | MACB_BF(PHYA, mii_id) 411 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 412 | MACB_BF(CODE, MACB_MAN_C45_CODE) 413 | MACB_BF(DATA, value))); 414 } else { 415 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 416 | MACB_BF(RW, MACB_MAN_C22_WRITE) 417 | MACB_BF(PHYA, mii_id) 418 | MACB_BF(REGA, regnum) 419 | MACB_BF(CODE, MACB_MAN_C22_CODE) 420 | MACB_BF(DATA, value))); 421 } 422 423 status = macb_mdio_wait_for_idle(bp); 424 if (status < 0) 425 goto mdio_write_exit; 426 427 mdio_write_exit: 428 pm_runtime_mark_last_busy(&bp->pdev->dev); 429 pm_runtime_put_autosuspend(&bp->pdev->dev); 430 mdio_pm_exit: 431 return status; 432 } 433 434 static void macb_init_buffers(struct macb *bp) 435 { 436 struct macb_queue *queue; 437 unsigned int q; 438 439 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 440 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 441 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 442 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 443 queue_writel(queue, RBQPH, 444 upper_32_bits(queue->rx_ring_dma)); 445 #endif 446 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 447 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 448 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 449 queue_writel(queue, TBQPH, 450 upper_32_bits(queue->tx_ring_dma)); 451 #endif 452 } 453 } 454 455 /** 456 * macb_set_tx_clk() - Set a clock to a new frequency 457 * @clk Pointer to the clock to change 458 * @rate New frequency in Hz 459 * @dev Pointer to the struct net_device 460 */ 461 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 462 { 463 long ferr, rate, rate_rounded; 464 465 if (!clk) 466 return; 467 468 switch (speed) { 469 case SPEED_10: 470 rate = 2500000; 471 break; 472 case SPEED_100: 473 rate = 25000000; 474 break; 475 case SPEED_1000: 476 rate = 125000000; 477 break; 478 default: 479 return; 480 } 481 482 rate_rounded = clk_round_rate(clk, rate); 483 if (rate_rounded < 0) 484 return; 485 486 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 487 * is not satisfied. 488 */ 489 ferr = abs(rate_rounded - rate); 490 ferr = DIV_ROUND_UP(ferr, rate / 100000); 491 if (ferr > 5) 492 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 493 rate); 494 495 if (clk_set_rate(clk, rate_rounded)) 496 netdev_err(dev, "adjusting tx_clk failed.\n"); 497 } 498 499 static void macb_validate(struct phylink_config *config, 500 unsigned long *supported, 501 struct phylink_link_state *state) 502 { 503 struct net_device *ndev = to_net_dev(config->dev); 504 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 505 struct macb *bp = netdev_priv(ndev); 506 507 /* We only support MII, RMII, GMII, RGMII & SGMII. */ 508 if (state->interface != PHY_INTERFACE_MODE_NA && 509 state->interface != PHY_INTERFACE_MODE_MII && 510 state->interface != PHY_INTERFACE_MODE_RMII && 511 state->interface != PHY_INTERFACE_MODE_GMII && 512 state->interface != PHY_INTERFACE_MODE_SGMII && 513 !phy_interface_mode_is_rgmii(state->interface)) { 514 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 515 return; 516 } 517 518 if (!macb_is_gem(bp) && 519 (state->interface == PHY_INTERFACE_MODE_GMII || 520 phy_interface_mode_is_rgmii(state->interface))) { 521 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 522 return; 523 } 524 525 phylink_set_port_modes(mask); 526 phylink_set(mask, Autoneg); 527 phylink_set(mask, Asym_Pause); 528 529 phylink_set(mask, 10baseT_Half); 530 phylink_set(mask, 10baseT_Full); 531 phylink_set(mask, 100baseT_Half); 532 phylink_set(mask, 100baseT_Full); 533 534 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && 535 (state->interface == PHY_INTERFACE_MODE_NA || 536 state->interface == PHY_INTERFACE_MODE_GMII || 537 state->interface == PHY_INTERFACE_MODE_SGMII || 538 phy_interface_mode_is_rgmii(state->interface))) { 539 phylink_set(mask, 1000baseT_Full); 540 phylink_set(mask, 1000baseX_Full); 541 542 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) 543 phylink_set(mask, 1000baseT_Half); 544 } 545 546 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 547 bitmap_and(state->advertising, state->advertising, mask, 548 __ETHTOOL_LINK_MODE_MASK_NBITS); 549 } 550 551 static void macb_mac_pcs_get_state(struct phylink_config *config, 552 struct phylink_link_state *state) 553 { 554 state->link = 0; 555 } 556 557 static void macb_mac_an_restart(struct phylink_config *config) 558 { 559 /* Not supported */ 560 } 561 562 static void macb_mac_config(struct phylink_config *config, unsigned int mode, 563 const struct phylink_link_state *state) 564 { 565 struct net_device *ndev = to_net_dev(config->dev); 566 struct macb *bp = netdev_priv(ndev); 567 unsigned long flags; 568 u32 old_ctrl, ctrl; 569 570 spin_lock_irqsave(&bp->lock, flags); 571 572 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); 573 574 /* Clear all the bits we might set later */ 575 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE)); 576 577 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { 578 if (state->interface == PHY_INTERFACE_MODE_RMII) 579 ctrl |= MACB_BIT(RM9200_RMII); 580 } else { 581 ctrl &= ~(GEM_BIT(GBE) | GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); 582 583 /* We do not support MLO_PAUSE_RX yet */ 584 if (state->pause & MLO_PAUSE_TX) 585 ctrl |= MACB_BIT(PAE); 586 587 if (state->interface == PHY_INTERFACE_MODE_SGMII) 588 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 589 } 590 591 if (state->speed == SPEED_1000) 592 ctrl |= GEM_BIT(GBE); 593 else if (state->speed == SPEED_100) 594 ctrl |= MACB_BIT(SPD); 595 596 if (state->duplex) 597 ctrl |= MACB_BIT(FD); 598 599 /* Apply the new configuration, if any */ 600 if (old_ctrl ^ ctrl) 601 macb_or_gem_writel(bp, NCFGR, ctrl); 602 603 bp->speed = state->speed; 604 605 spin_unlock_irqrestore(&bp->lock, flags); 606 } 607 608 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, 609 phy_interface_t interface) 610 { 611 struct net_device *ndev = to_net_dev(config->dev); 612 struct macb *bp = netdev_priv(ndev); 613 struct macb_queue *queue; 614 unsigned int q; 615 u32 ctrl; 616 617 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 618 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 619 queue_writel(queue, IDR, 620 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 621 622 /* Disable Rx and Tx */ 623 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); 624 macb_writel(bp, NCR, ctrl); 625 626 netif_tx_stop_all_queues(ndev); 627 } 628 629 static void macb_mac_link_up(struct phylink_config *config, unsigned int mode, 630 phy_interface_t interface, struct phy_device *phy) 631 { 632 struct net_device *ndev = to_net_dev(config->dev); 633 struct macb *bp = netdev_priv(ndev); 634 struct macb_queue *queue; 635 unsigned int q; 636 637 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 638 macb_set_tx_clk(bp->tx_clk, bp->speed, ndev); 639 640 /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down 641 * cleared the pipeline and control registers. 642 */ 643 bp->macbgem_ops.mog_init_rings(bp); 644 macb_init_buffers(bp); 645 646 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 647 queue_writel(queue, IER, 648 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 649 } 650 651 /* Enable Rx and Tx */ 652 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); 653 654 netif_tx_wake_all_queues(ndev); 655 } 656 657 static const struct phylink_mac_ops macb_phylink_ops = { 658 .validate = macb_validate, 659 .mac_pcs_get_state = macb_mac_pcs_get_state, 660 .mac_an_restart = macb_mac_an_restart, 661 .mac_config = macb_mac_config, 662 .mac_link_down = macb_mac_link_down, 663 .mac_link_up = macb_mac_link_up, 664 }; 665 666 static bool macb_phy_handle_exists(struct device_node *dn) 667 { 668 dn = of_parse_phandle(dn, "phy-handle", 0); 669 of_node_put(dn); 670 return dn != NULL; 671 } 672 673 static int macb_phylink_connect(struct macb *bp) 674 { 675 struct device_node *dn = bp->pdev->dev.of_node; 676 struct net_device *dev = bp->dev; 677 struct phy_device *phydev; 678 int ret; 679 680 if (dn) 681 ret = phylink_of_phy_connect(bp->phylink, dn, 0); 682 683 if (!dn || (ret && !macb_phy_handle_exists(dn))) { 684 phydev = phy_find_first(bp->mii_bus); 685 if (!phydev) { 686 netdev_err(dev, "no PHY found\n"); 687 return -ENXIO; 688 } 689 690 /* attach the mac to the phy */ 691 ret = phylink_connect_phy(bp->phylink, phydev); 692 } 693 694 if (ret) { 695 netdev_err(dev, "Could not attach PHY (%d)\n", ret); 696 return ret; 697 } 698 699 phylink_start(bp->phylink); 700 701 return 0; 702 } 703 704 /* based on au1000_eth. c*/ 705 static int macb_mii_probe(struct net_device *dev) 706 { 707 struct macb *bp = netdev_priv(dev); 708 709 bp->phylink_config.dev = &dev->dev; 710 bp->phylink_config.type = PHYLINK_NETDEV; 711 712 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, 713 bp->phy_interface, &macb_phylink_ops); 714 if (IS_ERR(bp->phylink)) { 715 netdev_err(dev, "Could not create a phylink instance (%ld)\n", 716 PTR_ERR(bp->phylink)); 717 return PTR_ERR(bp->phylink); 718 } 719 720 return 0; 721 } 722 723 static int macb_mdiobus_register(struct macb *bp) 724 { 725 struct device_node *child, *np = bp->pdev->dev.of_node; 726 727 /* Only create the PHY from the device tree if at least one PHY is 728 * described. Otherwise scan the entire MDIO bus. We do this to support 729 * old device tree that did not follow the best practices and did not 730 * describe their network PHYs. 731 */ 732 for_each_available_child_of_node(np, child) 733 if (of_mdiobus_child_is_phy(child)) { 734 /* The loop increments the child refcount, 735 * decrement it before returning. 736 */ 737 of_node_put(child); 738 739 return of_mdiobus_register(bp->mii_bus, np); 740 } 741 742 return mdiobus_register(bp->mii_bus); 743 } 744 745 static int macb_mii_init(struct macb *bp) 746 { 747 int err = -ENXIO; 748 749 /* Enable management port */ 750 macb_writel(bp, NCR, MACB_BIT(MPE)); 751 752 bp->mii_bus = mdiobus_alloc(); 753 if (!bp->mii_bus) { 754 err = -ENOMEM; 755 goto err_out; 756 } 757 758 bp->mii_bus->name = "MACB_mii_bus"; 759 bp->mii_bus->read = &macb_mdio_read; 760 bp->mii_bus->write = &macb_mdio_write; 761 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 762 bp->pdev->name, bp->pdev->id); 763 bp->mii_bus->priv = bp; 764 bp->mii_bus->parent = &bp->pdev->dev; 765 766 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 767 768 err = macb_mdiobus_register(bp); 769 if (err) 770 goto err_out_free_mdiobus; 771 772 err = macb_mii_probe(bp->dev); 773 if (err) 774 goto err_out_unregister_bus; 775 776 return 0; 777 778 err_out_unregister_bus: 779 mdiobus_unregister(bp->mii_bus); 780 err_out_free_mdiobus: 781 mdiobus_free(bp->mii_bus); 782 err_out: 783 return err; 784 } 785 786 static void macb_update_stats(struct macb *bp) 787 { 788 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 789 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 790 int offset = MACB_PFR; 791 792 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 793 794 for (; p < end; p++, offset += 4) 795 *p += bp->macb_reg_readl(bp, offset); 796 } 797 798 static int macb_halt_tx(struct macb *bp) 799 { 800 unsigned long halt_time, timeout; 801 u32 status; 802 803 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 804 805 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 806 do { 807 halt_time = jiffies; 808 status = macb_readl(bp, TSR); 809 if (!(status & MACB_BIT(TGO))) 810 return 0; 811 812 udelay(250); 813 } while (time_before(halt_time, timeout)); 814 815 return -ETIMEDOUT; 816 } 817 818 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 819 { 820 if (tx_skb->mapping) { 821 if (tx_skb->mapped_as_page) 822 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 823 tx_skb->size, DMA_TO_DEVICE); 824 else 825 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 826 tx_skb->size, DMA_TO_DEVICE); 827 tx_skb->mapping = 0; 828 } 829 830 if (tx_skb->skb) { 831 dev_kfree_skb_any(tx_skb->skb); 832 tx_skb->skb = NULL; 833 } 834 } 835 836 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 837 { 838 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 839 struct macb_dma_desc_64 *desc_64; 840 841 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 842 desc_64 = macb_64b_desc(bp, desc); 843 desc_64->addrh = upper_32_bits(addr); 844 /* The low bits of RX address contain the RX_USED bit, clearing 845 * of which allows packet RX. Make sure the high bits are also 846 * visible to HW at that point. 847 */ 848 dma_wmb(); 849 } 850 #endif 851 desc->addr = lower_32_bits(addr); 852 } 853 854 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 855 { 856 dma_addr_t addr = 0; 857 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 858 struct macb_dma_desc_64 *desc_64; 859 860 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 861 desc_64 = macb_64b_desc(bp, desc); 862 addr = ((u64)(desc_64->addrh) << 32); 863 } 864 #endif 865 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 866 return addr; 867 } 868 869 static void macb_tx_error_task(struct work_struct *work) 870 { 871 struct macb_queue *queue = container_of(work, struct macb_queue, 872 tx_error_task); 873 struct macb *bp = queue->bp; 874 struct macb_tx_skb *tx_skb; 875 struct macb_dma_desc *desc; 876 struct sk_buff *skb; 877 unsigned int tail; 878 unsigned long flags; 879 880 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 881 (unsigned int)(queue - bp->queues), 882 queue->tx_tail, queue->tx_head); 883 884 /* Prevent the queue IRQ handlers from running: each of them may call 885 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 886 * As explained below, we have to halt the transmission before updating 887 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 888 * network engine about the macb/gem being halted. 889 */ 890 spin_lock_irqsave(&bp->lock, flags); 891 892 /* Make sure nobody is trying to queue up new packets */ 893 netif_tx_stop_all_queues(bp->dev); 894 895 /* Stop transmission now 896 * (in case we have just queued new packets) 897 * macb/gem must be halted to write TBQP register 898 */ 899 if (macb_halt_tx(bp)) 900 /* Just complain for now, reinitializing TX path can be good */ 901 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 902 903 /* Treat frames in TX queue including the ones that caused the error. 904 * Free transmit buffers in upper layer. 905 */ 906 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 907 u32 ctrl; 908 909 desc = macb_tx_desc(queue, tail); 910 ctrl = desc->ctrl; 911 tx_skb = macb_tx_skb(queue, tail); 912 skb = tx_skb->skb; 913 914 if (ctrl & MACB_BIT(TX_USED)) { 915 /* skb is set for the last buffer of the frame */ 916 while (!skb) { 917 macb_tx_unmap(bp, tx_skb); 918 tail++; 919 tx_skb = macb_tx_skb(queue, tail); 920 skb = tx_skb->skb; 921 } 922 923 /* ctrl still refers to the first buffer descriptor 924 * since it's the only one written back by the hardware 925 */ 926 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 927 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 928 macb_tx_ring_wrap(bp, tail), 929 skb->data); 930 bp->dev->stats.tx_packets++; 931 queue->stats.tx_packets++; 932 bp->dev->stats.tx_bytes += skb->len; 933 queue->stats.tx_bytes += skb->len; 934 } 935 } else { 936 /* "Buffers exhausted mid-frame" errors may only happen 937 * if the driver is buggy, so complain loudly about 938 * those. Statistics are updated by hardware. 939 */ 940 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 941 netdev_err(bp->dev, 942 "BUG: TX buffers exhausted mid-frame\n"); 943 944 desc->ctrl = ctrl | MACB_BIT(TX_USED); 945 } 946 947 macb_tx_unmap(bp, tx_skb); 948 } 949 950 /* Set end of TX queue */ 951 desc = macb_tx_desc(queue, 0); 952 macb_set_addr(bp, desc, 0); 953 desc->ctrl = MACB_BIT(TX_USED); 954 955 /* Make descriptor updates visible to hardware */ 956 wmb(); 957 958 /* Reinitialize the TX desc queue */ 959 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 960 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 961 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 962 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 963 #endif 964 /* Make TX ring reflect state of hardware */ 965 queue->tx_head = 0; 966 queue->tx_tail = 0; 967 968 /* Housework before enabling TX IRQ */ 969 macb_writel(bp, TSR, macb_readl(bp, TSR)); 970 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 971 972 /* Now we are ready to start transmission again */ 973 netif_tx_start_all_queues(bp->dev); 974 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 975 976 spin_unlock_irqrestore(&bp->lock, flags); 977 } 978 979 static void macb_tx_interrupt(struct macb_queue *queue) 980 { 981 unsigned int tail; 982 unsigned int head; 983 u32 status; 984 struct macb *bp = queue->bp; 985 u16 queue_index = queue - bp->queues; 986 987 status = macb_readl(bp, TSR); 988 macb_writel(bp, TSR, status); 989 990 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 991 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 992 993 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 994 (unsigned long)status); 995 996 head = queue->tx_head; 997 for (tail = queue->tx_tail; tail != head; tail++) { 998 struct macb_tx_skb *tx_skb; 999 struct sk_buff *skb; 1000 struct macb_dma_desc *desc; 1001 u32 ctrl; 1002 1003 desc = macb_tx_desc(queue, tail); 1004 1005 /* Make hw descriptor updates visible to CPU */ 1006 rmb(); 1007 1008 ctrl = desc->ctrl; 1009 1010 /* TX_USED bit is only set by hardware on the very first buffer 1011 * descriptor of the transmitted frame. 1012 */ 1013 if (!(ctrl & MACB_BIT(TX_USED))) 1014 break; 1015 1016 /* Process all buffers of the current transmitted frame */ 1017 for (;; tail++) { 1018 tx_skb = macb_tx_skb(queue, tail); 1019 skb = tx_skb->skb; 1020 1021 /* First, update TX stats if needed */ 1022 if (skb) { 1023 if (unlikely(skb_shinfo(skb)->tx_flags & 1024 SKBTX_HW_TSTAMP) && 1025 gem_ptp_do_txstamp(queue, skb, desc) == 0) { 1026 /* skb now belongs to timestamp buffer 1027 * and will be removed later 1028 */ 1029 tx_skb->skb = NULL; 1030 } 1031 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 1032 macb_tx_ring_wrap(bp, tail), 1033 skb->data); 1034 bp->dev->stats.tx_packets++; 1035 queue->stats.tx_packets++; 1036 bp->dev->stats.tx_bytes += skb->len; 1037 queue->stats.tx_bytes += skb->len; 1038 } 1039 1040 /* Now we can safely release resources */ 1041 macb_tx_unmap(bp, tx_skb); 1042 1043 /* skb is set only for the last buffer of the frame. 1044 * WARNING: at this point skb has been freed by 1045 * macb_tx_unmap(). 1046 */ 1047 if (skb) 1048 break; 1049 } 1050 } 1051 1052 queue->tx_tail = tail; 1053 if (__netif_subqueue_stopped(bp->dev, queue_index) && 1054 CIRC_CNT(queue->tx_head, queue->tx_tail, 1055 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 1056 netif_wake_subqueue(bp->dev, queue_index); 1057 } 1058 1059 static void gem_rx_refill(struct macb_queue *queue) 1060 { 1061 unsigned int entry; 1062 struct sk_buff *skb; 1063 dma_addr_t paddr; 1064 struct macb *bp = queue->bp; 1065 struct macb_dma_desc *desc; 1066 1067 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, 1068 bp->rx_ring_size) > 0) { 1069 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); 1070 1071 /* Make hw descriptor updates visible to CPU */ 1072 rmb(); 1073 1074 queue->rx_prepared_head++; 1075 desc = macb_rx_desc(queue, entry); 1076 1077 if (!queue->rx_skbuff[entry]) { 1078 /* allocate sk_buff for this free entry in ring */ 1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 1080 if (unlikely(!skb)) { 1081 netdev_err(bp->dev, 1082 "Unable to allocate sk_buff\n"); 1083 break; 1084 } 1085 1086 /* now fill corresponding descriptor entry */ 1087 paddr = dma_map_single(&bp->pdev->dev, skb->data, 1088 bp->rx_buffer_size, 1089 DMA_FROM_DEVICE); 1090 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 1091 dev_kfree_skb(skb); 1092 break; 1093 } 1094 1095 queue->rx_skbuff[entry] = skb; 1096 1097 if (entry == bp->rx_ring_size - 1) 1098 paddr |= MACB_BIT(RX_WRAP); 1099 desc->ctrl = 0; 1100 /* Setting addr clears RX_USED and allows reception, 1101 * make sure ctrl is cleared first to avoid a race. 1102 */ 1103 dma_wmb(); 1104 macb_set_addr(bp, desc, paddr); 1105 1106 /* properly align Ethernet header */ 1107 skb_reserve(skb, NET_IP_ALIGN); 1108 } else { 1109 desc->ctrl = 0; 1110 dma_wmb(); 1111 desc->addr &= ~MACB_BIT(RX_USED); 1112 } 1113 } 1114 1115 /* Make descriptor updates visible to hardware */ 1116 wmb(); 1117 1118 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", 1119 queue, queue->rx_prepared_head, queue->rx_tail); 1120 } 1121 1122 /* Mark DMA descriptors from begin up to and not including end as unused */ 1123 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, 1124 unsigned int end) 1125 { 1126 unsigned int frag; 1127 1128 for (frag = begin; frag != end; frag++) { 1129 struct macb_dma_desc *desc = macb_rx_desc(queue, frag); 1130 1131 desc->addr &= ~MACB_BIT(RX_USED); 1132 } 1133 1134 /* Make descriptor updates visible to hardware */ 1135 wmb(); 1136 1137 /* When this happens, the hardware stats registers for 1138 * whatever caused this is updated, so we don't have to record 1139 * anything. 1140 */ 1141 } 1142 1143 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, 1144 int budget) 1145 { 1146 struct macb *bp = queue->bp; 1147 unsigned int len; 1148 unsigned int entry; 1149 struct sk_buff *skb; 1150 struct macb_dma_desc *desc; 1151 int count = 0; 1152 1153 while (count < budget) { 1154 u32 ctrl; 1155 dma_addr_t addr; 1156 bool rxused; 1157 1158 entry = macb_rx_ring_wrap(bp, queue->rx_tail); 1159 desc = macb_rx_desc(queue, entry); 1160 1161 /* Make hw descriptor updates visible to CPU */ 1162 rmb(); 1163 1164 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 1165 addr = macb_get_addr(bp, desc); 1166 1167 if (!rxused) 1168 break; 1169 1170 /* Ensure ctrl is at least as up-to-date as rxused */ 1171 dma_rmb(); 1172 1173 ctrl = desc->ctrl; 1174 1175 queue->rx_tail++; 1176 count++; 1177 1178 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 1179 netdev_err(bp->dev, 1180 "not whole frame pointed by descriptor\n"); 1181 bp->dev->stats.rx_dropped++; 1182 queue->stats.rx_dropped++; 1183 break; 1184 } 1185 skb = queue->rx_skbuff[entry]; 1186 if (unlikely(!skb)) { 1187 netdev_err(bp->dev, 1188 "inconsistent Rx descriptor chain\n"); 1189 bp->dev->stats.rx_dropped++; 1190 queue->stats.rx_dropped++; 1191 break; 1192 } 1193 /* now everything is ready for receiving packet */ 1194 queue->rx_skbuff[entry] = NULL; 1195 len = ctrl & bp->rx_frm_len_mask; 1196 1197 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1198 1199 skb_put(skb, len); 1200 dma_unmap_single(&bp->pdev->dev, addr, 1201 bp->rx_buffer_size, DMA_FROM_DEVICE); 1202 1203 skb->protocol = eth_type_trans(skb, bp->dev); 1204 skb_checksum_none_assert(skb); 1205 if (bp->dev->features & NETIF_F_RXCSUM && 1206 !(bp->dev->flags & IFF_PROMISC) && 1207 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1208 skb->ip_summed = CHECKSUM_UNNECESSARY; 1209 1210 bp->dev->stats.rx_packets++; 1211 queue->stats.rx_packets++; 1212 bp->dev->stats.rx_bytes += skb->len; 1213 queue->stats.rx_bytes += skb->len; 1214 1215 gem_ptp_do_rxstamp(bp, skb, desc); 1216 1217 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1218 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1219 skb->len, skb->csum); 1220 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1221 skb_mac_header(skb), 16, true); 1222 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1223 skb->data, 32, true); 1224 #endif 1225 1226 napi_gro_receive(napi, skb); 1227 } 1228 1229 gem_rx_refill(queue); 1230 1231 return count; 1232 } 1233 1234 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, 1235 unsigned int first_frag, unsigned int last_frag) 1236 { 1237 unsigned int len; 1238 unsigned int frag; 1239 unsigned int offset; 1240 struct sk_buff *skb; 1241 struct macb_dma_desc *desc; 1242 struct macb *bp = queue->bp; 1243 1244 desc = macb_rx_desc(queue, last_frag); 1245 len = desc->ctrl & bp->rx_frm_len_mask; 1246 1247 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1248 macb_rx_ring_wrap(bp, first_frag), 1249 macb_rx_ring_wrap(bp, last_frag), len); 1250 1251 /* The ethernet header starts NET_IP_ALIGN bytes into the 1252 * first buffer. Since the header is 14 bytes, this makes the 1253 * payload word-aligned. 1254 * 1255 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1256 * the two padding bytes into the skb so that we avoid hitting 1257 * the slowpath in memcpy(), and pull them off afterwards. 1258 */ 1259 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1260 if (!skb) { 1261 bp->dev->stats.rx_dropped++; 1262 for (frag = first_frag; ; frag++) { 1263 desc = macb_rx_desc(queue, frag); 1264 desc->addr &= ~MACB_BIT(RX_USED); 1265 if (frag == last_frag) 1266 break; 1267 } 1268 1269 /* Make descriptor updates visible to hardware */ 1270 wmb(); 1271 1272 return 1; 1273 } 1274 1275 offset = 0; 1276 len += NET_IP_ALIGN; 1277 skb_checksum_none_assert(skb); 1278 skb_put(skb, len); 1279 1280 for (frag = first_frag; ; frag++) { 1281 unsigned int frag_len = bp->rx_buffer_size; 1282 1283 if (offset + frag_len > len) { 1284 if (unlikely(frag != last_frag)) { 1285 dev_kfree_skb_any(skb); 1286 return -1; 1287 } 1288 frag_len = len - offset; 1289 } 1290 skb_copy_to_linear_data_offset(skb, offset, 1291 macb_rx_buffer(queue, frag), 1292 frag_len); 1293 offset += bp->rx_buffer_size; 1294 desc = macb_rx_desc(queue, frag); 1295 desc->addr &= ~MACB_BIT(RX_USED); 1296 1297 if (frag == last_frag) 1298 break; 1299 } 1300 1301 /* Make descriptor updates visible to hardware */ 1302 wmb(); 1303 1304 __skb_pull(skb, NET_IP_ALIGN); 1305 skb->protocol = eth_type_trans(skb, bp->dev); 1306 1307 bp->dev->stats.rx_packets++; 1308 bp->dev->stats.rx_bytes += skb->len; 1309 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1310 skb->len, skb->csum); 1311 napi_gro_receive(napi, skb); 1312 1313 return 0; 1314 } 1315 1316 static inline void macb_init_rx_ring(struct macb_queue *queue) 1317 { 1318 struct macb *bp = queue->bp; 1319 dma_addr_t addr; 1320 struct macb_dma_desc *desc = NULL; 1321 int i; 1322 1323 addr = queue->rx_buffers_dma; 1324 for (i = 0; i < bp->rx_ring_size; i++) { 1325 desc = macb_rx_desc(queue, i); 1326 macb_set_addr(bp, desc, addr); 1327 desc->ctrl = 0; 1328 addr += bp->rx_buffer_size; 1329 } 1330 desc->addr |= MACB_BIT(RX_WRAP); 1331 queue->rx_tail = 0; 1332 } 1333 1334 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, 1335 int budget) 1336 { 1337 struct macb *bp = queue->bp; 1338 bool reset_rx_queue = false; 1339 int received = 0; 1340 unsigned int tail; 1341 int first_frag = -1; 1342 1343 for (tail = queue->rx_tail; budget > 0; tail++) { 1344 struct macb_dma_desc *desc = macb_rx_desc(queue, tail); 1345 u32 ctrl; 1346 1347 /* Make hw descriptor updates visible to CPU */ 1348 rmb(); 1349 1350 if (!(desc->addr & MACB_BIT(RX_USED))) 1351 break; 1352 1353 /* Ensure ctrl is at least as up-to-date as addr */ 1354 dma_rmb(); 1355 1356 ctrl = desc->ctrl; 1357 1358 if (ctrl & MACB_BIT(RX_SOF)) { 1359 if (first_frag != -1) 1360 discard_partial_frame(queue, first_frag, tail); 1361 first_frag = tail; 1362 } 1363 1364 if (ctrl & MACB_BIT(RX_EOF)) { 1365 int dropped; 1366 1367 if (unlikely(first_frag == -1)) { 1368 reset_rx_queue = true; 1369 continue; 1370 } 1371 1372 dropped = macb_rx_frame(queue, napi, first_frag, tail); 1373 first_frag = -1; 1374 if (unlikely(dropped < 0)) { 1375 reset_rx_queue = true; 1376 continue; 1377 } 1378 if (!dropped) { 1379 received++; 1380 budget--; 1381 } 1382 } 1383 } 1384 1385 if (unlikely(reset_rx_queue)) { 1386 unsigned long flags; 1387 u32 ctrl; 1388 1389 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1390 1391 spin_lock_irqsave(&bp->lock, flags); 1392 1393 ctrl = macb_readl(bp, NCR); 1394 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1395 1396 macb_init_rx_ring(queue); 1397 queue_writel(queue, RBQP, queue->rx_ring_dma); 1398 1399 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1400 1401 spin_unlock_irqrestore(&bp->lock, flags); 1402 return received; 1403 } 1404 1405 if (first_frag != -1) 1406 queue->rx_tail = first_frag; 1407 else 1408 queue->rx_tail = tail; 1409 1410 return received; 1411 } 1412 1413 static int macb_poll(struct napi_struct *napi, int budget) 1414 { 1415 struct macb_queue *queue = container_of(napi, struct macb_queue, napi); 1416 struct macb *bp = queue->bp; 1417 int work_done; 1418 u32 status; 1419 1420 status = macb_readl(bp, RSR); 1421 macb_writel(bp, RSR, status); 1422 1423 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1424 (unsigned long)status, budget); 1425 1426 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); 1427 if (work_done < budget) { 1428 napi_complete_done(napi, work_done); 1429 1430 /* Packets received while interrupts were disabled */ 1431 status = macb_readl(bp, RSR); 1432 if (status) { 1433 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1434 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1435 napi_reschedule(napi); 1436 } else { 1437 queue_writel(queue, IER, bp->rx_intr_mask); 1438 } 1439 } 1440 1441 /* TODO: Handle errors */ 1442 1443 return work_done; 1444 } 1445 1446 static void macb_hresp_error_task(unsigned long data) 1447 { 1448 struct macb *bp = (struct macb *)data; 1449 struct net_device *dev = bp->dev; 1450 struct macb_queue *queue = bp->queues; 1451 unsigned int q; 1452 u32 ctrl; 1453 1454 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1455 queue_writel(queue, IDR, bp->rx_intr_mask | 1456 MACB_TX_INT_FLAGS | 1457 MACB_BIT(HRESP)); 1458 } 1459 ctrl = macb_readl(bp, NCR); 1460 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 1461 macb_writel(bp, NCR, ctrl); 1462 1463 netif_tx_stop_all_queues(dev); 1464 netif_carrier_off(dev); 1465 1466 bp->macbgem_ops.mog_init_rings(bp); 1467 1468 /* Initialize TX and RX buffers */ 1469 macb_init_buffers(bp); 1470 1471 /* Enable interrupts */ 1472 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1473 queue_writel(queue, IER, 1474 bp->rx_intr_mask | 1475 MACB_TX_INT_FLAGS | 1476 MACB_BIT(HRESP)); 1477 1478 ctrl |= MACB_BIT(RE) | MACB_BIT(TE); 1479 macb_writel(bp, NCR, ctrl); 1480 1481 netif_carrier_on(dev); 1482 netif_tx_start_all_queues(dev); 1483 } 1484 1485 static void macb_tx_restart(struct macb_queue *queue) 1486 { 1487 unsigned int head = queue->tx_head; 1488 unsigned int tail = queue->tx_tail; 1489 struct macb *bp = queue->bp; 1490 1491 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1492 queue_writel(queue, ISR, MACB_BIT(TXUBR)); 1493 1494 if (head == tail) 1495 return; 1496 1497 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1498 } 1499 1500 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1501 { 1502 struct macb_queue *queue = dev_id; 1503 struct macb *bp = queue->bp; 1504 struct net_device *dev = bp->dev; 1505 u32 status, ctrl; 1506 1507 status = queue_readl(queue, ISR); 1508 1509 if (unlikely(!status)) 1510 return IRQ_NONE; 1511 1512 spin_lock(&bp->lock); 1513 1514 while (status) { 1515 /* close possible race with dev_close */ 1516 if (unlikely(!netif_running(dev))) { 1517 queue_writel(queue, IDR, -1); 1518 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1519 queue_writel(queue, ISR, -1); 1520 break; 1521 } 1522 1523 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1524 (unsigned int)(queue - bp->queues), 1525 (unsigned long)status); 1526 1527 if (status & bp->rx_intr_mask) { 1528 /* There's no point taking any more interrupts 1529 * until we have processed the buffers. The 1530 * scheduling call may fail if the poll routine 1531 * is already scheduled, so disable interrupts 1532 * now. 1533 */ 1534 queue_writel(queue, IDR, bp->rx_intr_mask); 1535 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1536 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1537 1538 if (napi_schedule_prep(&queue->napi)) { 1539 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1540 __napi_schedule(&queue->napi); 1541 } 1542 } 1543 1544 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1545 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1546 schedule_work(&queue->tx_error_task); 1547 1548 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1549 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1550 1551 break; 1552 } 1553 1554 if (status & MACB_BIT(TCOMP)) 1555 macb_tx_interrupt(queue); 1556 1557 if (status & MACB_BIT(TXUBR)) 1558 macb_tx_restart(queue); 1559 1560 /* Link change detection isn't possible with RMII, so we'll 1561 * add that if/when we get our hands on a full-blown MII PHY. 1562 */ 1563 1564 /* There is a hardware issue under heavy load where DMA can 1565 * stop, this causes endless "used buffer descriptor read" 1566 * interrupts but it can be cleared by re-enabling RX. See 1567 * the at91rm9200 manual, section 41.3.1 or the Zynq manual 1568 * section 16.7.4 for details. RXUBR is only enabled for 1569 * these two versions. 1570 */ 1571 if (status & MACB_BIT(RXUBR)) { 1572 ctrl = macb_readl(bp, NCR); 1573 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1574 wmb(); 1575 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1576 1577 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1578 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1579 } 1580 1581 if (status & MACB_BIT(ISR_ROVR)) { 1582 /* We missed at least one packet */ 1583 if (macb_is_gem(bp)) 1584 bp->hw_stats.gem.rx_overruns++; 1585 else 1586 bp->hw_stats.macb.rx_overruns++; 1587 1588 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1589 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1590 } 1591 1592 if (status & MACB_BIT(HRESP)) { 1593 tasklet_schedule(&bp->hresp_err_tasklet); 1594 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1595 1596 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1597 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1598 } 1599 status = queue_readl(queue, ISR); 1600 } 1601 1602 spin_unlock(&bp->lock); 1603 1604 return IRQ_HANDLED; 1605 } 1606 1607 #ifdef CONFIG_NET_POLL_CONTROLLER 1608 /* Polling receive - used by netconsole and other diagnostic tools 1609 * to allow network i/o with interrupts disabled. 1610 */ 1611 static void macb_poll_controller(struct net_device *dev) 1612 { 1613 struct macb *bp = netdev_priv(dev); 1614 struct macb_queue *queue; 1615 unsigned long flags; 1616 unsigned int q; 1617 1618 local_irq_save(flags); 1619 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1620 macb_interrupt(dev->irq, queue); 1621 local_irq_restore(flags); 1622 } 1623 #endif 1624 1625 static unsigned int macb_tx_map(struct macb *bp, 1626 struct macb_queue *queue, 1627 struct sk_buff *skb, 1628 unsigned int hdrlen) 1629 { 1630 dma_addr_t mapping; 1631 unsigned int len, entry, i, tx_head = queue->tx_head; 1632 struct macb_tx_skb *tx_skb = NULL; 1633 struct macb_dma_desc *desc; 1634 unsigned int offset, size, count = 0; 1635 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1636 unsigned int eof = 1, mss_mfs = 0; 1637 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1638 1639 /* LSO */ 1640 if (skb_shinfo(skb)->gso_size != 0) { 1641 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1642 /* UDP - UFO */ 1643 lso_ctrl = MACB_LSO_UFO_ENABLE; 1644 else 1645 /* TCP - TSO */ 1646 lso_ctrl = MACB_LSO_TSO_ENABLE; 1647 } 1648 1649 /* First, map non-paged data */ 1650 len = skb_headlen(skb); 1651 1652 /* first buffer length */ 1653 size = hdrlen; 1654 1655 offset = 0; 1656 while (len) { 1657 entry = macb_tx_ring_wrap(bp, tx_head); 1658 tx_skb = &queue->tx_skb[entry]; 1659 1660 mapping = dma_map_single(&bp->pdev->dev, 1661 skb->data + offset, 1662 size, DMA_TO_DEVICE); 1663 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1664 goto dma_error; 1665 1666 /* Save info to properly release resources */ 1667 tx_skb->skb = NULL; 1668 tx_skb->mapping = mapping; 1669 tx_skb->size = size; 1670 tx_skb->mapped_as_page = false; 1671 1672 len -= size; 1673 offset += size; 1674 count++; 1675 tx_head++; 1676 1677 size = min(len, bp->max_tx_length); 1678 } 1679 1680 /* Then, map paged data from fragments */ 1681 for (f = 0; f < nr_frags; f++) { 1682 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1683 1684 len = skb_frag_size(frag); 1685 offset = 0; 1686 while (len) { 1687 size = min(len, bp->max_tx_length); 1688 entry = macb_tx_ring_wrap(bp, tx_head); 1689 tx_skb = &queue->tx_skb[entry]; 1690 1691 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1692 offset, size, DMA_TO_DEVICE); 1693 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1694 goto dma_error; 1695 1696 /* Save info to properly release resources */ 1697 tx_skb->skb = NULL; 1698 tx_skb->mapping = mapping; 1699 tx_skb->size = size; 1700 tx_skb->mapped_as_page = true; 1701 1702 len -= size; 1703 offset += size; 1704 count++; 1705 tx_head++; 1706 } 1707 } 1708 1709 /* Should never happen */ 1710 if (unlikely(!tx_skb)) { 1711 netdev_err(bp->dev, "BUG! empty skb!\n"); 1712 return 0; 1713 } 1714 1715 /* This is the last buffer of the frame: save socket buffer */ 1716 tx_skb->skb = skb; 1717 1718 /* Update TX ring: update buffer descriptors in reverse order 1719 * to avoid race condition 1720 */ 1721 1722 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1723 * to set the end of TX queue 1724 */ 1725 i = tx_head; 1726 entry = macb_tx_ring_wrap(bp, i); 1727 ctrl = MACB_BIT(TX_USED); 1728 desc = macb_tx_desc(queue, entry); 1729 desc->ctrl = ctrl; 1730 1731 if (lso_ctrl) { 1732 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1733 /* include header and FCS in value given to h/w */ 1734 mss_mfs = skb_shinfo(skb)->gso_size + 1735 skb_transport_offset(skb) + 1736 ETH_FCS_LEN; 1737 else /* TSO */ { 1738 mss_mfs = skb_shinfo(skb)->gso_size; 1739 /* TCP Sequence Number Source Select 1740 * can be set only for TSO 1741 */ 1742 seq_ctrl = 0; 1743 } 1744 } 1745 1746 do { 1747 i--; 1748 entry = macb_tx_ring_wrap(bp, i); 1749 tx_skb = &queue->tx_skb[entry]; 1750 desc = macb_tx_desc(queue, entry); 1751 1752 ctrl = (u32)tx_skb->size; 1753 if (eof) { 1754 ctrl |= MACB_BIT(TX_LAST); 1755 eof = 0; 1756 } 1757 if (unlikely(entry == (bp->tx_ring_size - 1))) 1758 ctrl |= MACB_BIT(TX_WRAP); 1759 1760 /* First descriptor is header descriptor */ 1761 if (i == queue->tx_head) { 1762 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1763 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1764 if ((bp->dev->features & NETIF_F_HW_CSUM) && 1765 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) 1766 ctrl |= MACB_BIT(TX_NOCRC); 1767 } else 1768 /* Only set MSS/MFS on payload descriptors 1769 * (second or later descriptor) 1770 */ 1771 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1772 1773 /* Set TX buffer descriptor */ 1774 macb_set_addr(bp, desc, tx_skb->mapping); 1775 /* desc->addr must be visible to hardware before clearing 1776 * 'TX_USED' bit in desc->ctrl. 1777 */ 1778 wmb(); 1779 desc->ctrl = ctrl; 1780 } while (i != queue->tx_head); 1781 1782 queue->tx_head = tx_head; 1783 1784 return count; 1785 1786 dma_error: 1787 netdev_err(bp->dev, "TX DMA map failed\n"); 1788 1789 for (i = queue->tx_head; i != tx_head; i++) { 1790 tx_skb = macb_tx_skb(queue, i); 1791 1792 macb_tx_unmap(bp, tx_skb); 1793 } 1794 1795 return 0; 1796 } 1797 1798 static netdev_features_t macb_features_check(struct sk_buff *skb, 1799 struct net_device *dev, 1800 netdev_features_t features) 1801 { 1802 unsigned int nr_frags, f; 1803 unsigned int hdrlen; 1804 1805 /* Validate LSO compatibility */ 1806 1807 /* there is only one buffer or protocol is not UDP */ 1808 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) 1809 return features; 1810 1811 /* length of header */ 1812 hdrlen = skb_transport_offset(skb); 1813 1814 /* For UFO only: 1815 * When software supplies two or more payload buffers all payload buffers 1816 * apart from the last must be a multiple of 8 bytes in size. 1817 */ 1818 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 1819 return features & ~MACB_NETIF_LSO; 1820 1821 nr_frags = skb_shinfo(skb)->nr_frags; 1822 /* No need to check last fragment */ 1823 nr_frags--; 1824 for (f = 0; f < nr_frags; f++) { 1825 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1826 1827 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 1828 return features & ~MACB_NETIF_LSO; 1829 } 1830 return features; 1831 } 1832 1833 static inline int macb_clear_csum(struct sk_buff *skb) 1834 { 1835 /* no change for packets without checksum offloading */ 1836 if (skb->ip_summed != CHECKSUM_PARTIAL) 1837 return 0; 1838 1839 /* make sure we can modify the header */ 1840 if (unlikely(skb_cow_head(skb, 0))) 1841 return -1; 1842 1843 /* initialize checksum field 1844 * This is required - at least for Zynq, which otherwise calculates 1845 * wrong UDP header checksums for UDP packets with UDP data len <=2 1846 */ 1847 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1848 return 0; 1849 } 1850 1851 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) 1852 { 1853 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); 1854 int padlen = ETH_ZLEN - (*skb)->len; 1855 int headroom = skb_headroom(*skb); 1856 int tailroom = skb_tailroom(*skb); 1857 struct sk_buff *nskb; 1858 u32 fcs; 1859 1860 if (!(ndev->features & NETIF_F_HW_CSUM) || 1861 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || 1862 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ 1863 return 0; 1864 1865 if (padlen <= 0) { 1866 /* FCS could be appeded to tailroom. */ 1867 if (tailroom >= ETH_FCS_LEN) 1868 goto add_fcs; 1869 /* FCS could be appeded by moving data to headroom. */ 1870 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 1871 padlen = 0; 1872 /* No room for FCS, need to reallocate skb. */ 1873 else 1874 padlen = ETH_FCS_LEN; 1875 } else { 1876 /* Add room for FCS. */ 1877 padlen += ETH_FCS_LEN; 1878 } 1879 1880 if (!cloned && headroom + tailroom >= padlen) { 1881 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 1882 skb_set_tail_pointer(*skb, (*skb)->len); 1883 } else { 1884 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 1885 if (!nskb) 1886 return -ENOMEM; 1887 1888 dev_consume_skb_any(*skb); 1889 *skb = nskb; 1890 } 1891 1892 if (padlen > ETH_FCS_LEN) 1893 skb_put_zero(*skb, padlen - ETH_FCS_LEN); 1894 1895 add_fcs: 1896 /* set FCS to packet */ 1897 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); 1898 fcs = ~fcs; 1899 1900 skb_put_u8(*skb, fcs & 0xff); 1901 skb_put_u8(*skb, (fcs >> 8) & 0xff); 1902 skb_put_u8(*skb, (fcs >> 16) & 0xff); 1903 skb_put_u8(*skb, (fcs >> 24) & 0xff); 1904 1905 return 0; 1906 } 1907 1908 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1909 { 1910 u16 queue_index = skb_get_queue_mapping(skb); 1911 struct macb *bp = netdev_priv(dev); 1912 struct macb_queue *queue = &bp->queues[queue_index]; 1913 unsigned long flags; 1914 unsigned int desc_cnt, nr_frags, frag_size, f; 1915 unsigned int hdrlen; 1916 bool is_lso, is_udp = 0; 1917 netdev_tx_t ret = NETDEV_TX_OK; 1918 1919 if (macb_clear_csum(skb)) { 1920 dev_kfree_skb_any(skb); 1921 return ret; 1922 } 1923 1924 if (macb_pad_and_fcs(&skb, dev)) { 1925 dev_kfree_skb_any(skb); 1926 return ret; 1927 } 1928 1929 is_lso = (skb_shinfo(skb)->gso_size != 0); 1930 1931 if (is_lso) { 1932 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); 1933 1934 /* length of headers */ 1935 if (is_udp) 1936 /* only queue eth + ip headers separately for UDP */ 1937 hdrlen = skb_transport_offset(skb); 1938 else 1939 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 1940 if (skb_headlen(skb) < hdrlen) { 1941 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 1942 /* if this is required, would need to copy to single buffer */ 1943 return NETDEV_TX_BUSY; 1944 } 1945 } else 1946 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 1947 1948 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1949 netdev_vdbg(bp->dev, 1950 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1951 queue_index, skb->len, skb->head, skb->data, 1952 skb_tail_pointer(skb), skb_end_pointer(skb)); 1953 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1954 skb->data, 16, true); 1955 #endif 1956 1957 /* Count how many TX buffer descriptors are needed to send this 1958 * socket buffer: skb fragments of jumbo frames may need to be 1959 * split into many buffer descriptors. 1960 */ 1961 if (is_lso && (skb_headlen(skb) > hdrlen)) 1962 /* extra header descriptor if also payload in first buffer */ 1963 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 1964 else 1965 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1966 nr_frags = skb_shinfo(skb)->nr_frags; 1967 for (f = 0; f < nr_frags; f++) { 1968 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1969 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1970 } 1971 1972 spin_lock_irqsave(&bp->lock, flags); 1973 1974 /* This is a hard error, log it. */ 1975 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 1976 bp->tx_ring_size) < desc_cnt) { 1977 netif_stop_subqueue(dev, queue_index); 1978 spin_unlock_irqrestore(&bp->lock, flags); 1979 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1980 queue->tx_head, queue->tx_tail); 1981 return NETDEV_TX_BUSY; 1982 } 1983 1984 /* Map socket buffer for DMA transfer */ 1985 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 1986 dev_kfree_skb_any(skb); 1987 goto unlock; 1988 } 1989 1990 /* Make newly initialized descriptor visible to hardware */ 1991 wmb(); 1992 skb_tx_timestamp(skb); 1993 1994 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1995 1996 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 1997 netif_stop_subqueue(dev, queue_index); 1998 1999 unlock: 2000 spin_unlock_irqrestore(&bp->lock, flags); 2001 2002 return ret; 2003 } 2004 2005 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 2006 { 2007 if (!macb_is_gem(bp)) { 2008 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 2009 } else { 2010 bp->rx_buffer_size = size; 2011 2012 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 2013 netdev_dbg(bp->dev, 2014 "RX buffer must be multiple of %d bytes, expanding\n", 2015 RX_BUFFER_MULTIPLE); 2016 bp->rx_buffer_size = 2017 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 2018 } 2019 } 2020 2021 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 2022 bp->dev->mtu, bp->rx_buffer_size); 2023 } 2024 2025 static void gem_free_rx_buffers(struct macb *bp) 2026 { 2027 struct sk_buff *skb; 2028 struct macb_dma_desc *desc; 2029 struct macb_queue *queue; 2030 dma_addr_t addr; 2031 unsigned int q; 2032 int i; 2033 2034 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2035 if (!queue->rx_skbuff) 2036 continue; 2037 2038 for (i = 0; i < bp->rx_ring_size; i++) { 2039 skb = queue->rx_skbuff[i]; 2040 2041 if (!skb) 2042 continue; 2043 2044 desc = macb_rx_desc(queue, i); 2045 addr = macb_get_addr(bp, desc); 2046 2047 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 2048 DMA_FROM_DEVICE); 2049 dev_kfree_skb_any(skb); 2050 skb = NULL; 2051 } 2052 2053 kfree(queue->rx_skbuff); 2054 queue->rx_skbuff = NULL; 2055 } 2056 } 2057 2058 static void macb_free_rx_buffers(struct macb *bp) 2059 { 2060 struct macb_queue *queue = &bp->queues[0]; 2061 2062 if (queue->rx_buffers) { 2063 dma_free_coherent(&bp->pdev->dev, 2064 bp->rx_ring_size * bp->rx_buffer_size, 2065 queue->rx_buffers, queue->rx_buffers_dma); 2066 queue->rx_buffers = NULL; 2067 } 2068 } 2069 2070 static void macb_free_consistent(struct macb *bp) 2071 { 2072 struct macb_queue *queue; 2073 unsigned int q; 2074 int size; 2075 2076 bp->macbgem_ops.mog_free_rx_buffers(bp); 2077 2078 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2079 kfree(queue->tx_skb); 2080 queue->tx_skb = NULL; 2081 if (queue->tx_ring) { 2082 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2083 dma_free_coherent(&bp->pdev->dev, size, 2084 queue->tx_ring, queue->tx_ring_dma); 2085 queue->tx_ring = NULL; 2086 } 2087 if (queue->rx_ring) { 2088 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2089 dma_free_coherent(&bp->pdev->dev, size, 2090 queue->rx_ring, queue->rx_ring_dma); 2091 queue->rx_ring = NULL; 2092 } 2093 } 2094 } 2095 2096 static int gem_alloc_rx_buffers(struct macb *bp) 2097 { 2098 struct macb_queue *queue; 2099 unsigned int q; 2100 int size; 2101 2102 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2103 size = bp->rx_ring_size * sizeof(struct sk_buff *); 2104 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); 2105 if (!queue->rx_skbuff) 2106 return -ENOMEM; 2107 else 2108 netdev_dbg(bp->dev, 2109 "Allocated %d RX struct sk_buff entries at %p\n", 2110 bp->rx_ring_size, queue->rx_skbuff); 2111 } 2112 return 0; 2113 } 2114 2115 static int macb_alloc_rx_buffers(struct macb *bp) 2116 { 2117 struct macb_queue *queue = &bp->queues[0]; 2118 int size; 2119 2120 size = bp->rx_ring_size * bp->rx_buffer_size; 2121 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 2122 &queue->rx_buffers_dma, GFP_KERNEL); 2123 if (!queue->rx_buffers) 2124 return -ENOMEM; 2125 2126 netdev_dbg(bp->dev, 2127 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 2128 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); 2129 return 0; 2130 } 2131 2132 static int macb_alloc_consistent(struct macb *bp) 2133 { 2134 struct macb_queue *queue; 2135 unsigned int q; 2136 int size; 2137 2138 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2139 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2140 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2141 &queue->tx_ring_dma, 2142 GFP_KERNEL); 2143 if (!queue->tx_ring) 2144 goto out_err; 2145 netdev_dbg(bp->dev, 2146 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 2147 q, size, (unsigned long)queue->tx_ring_dma, 2148 queue->tx_ring); 2149 2150 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 2151 queue->tx_skb = kmalloc(size, GFP_KERNEL); 2152 if (!queue->tx_skb) 2153 goto out_err; 2154 2155 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2156 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2157 &queue->rx_ring_dma, GFP_KERNEL); 2158 if (!queue->rx_ring) 2159 goto out_err; 2160 netdev_dbg(bp->dev, 2161 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 2162 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); 2163 } 2164 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 2165 goto out_err; 2166 2167 return 0; 2168 2169 out_err: 2170 macb_free_consistent(bp); 2171 return -ENOMEM; 2172 } 2173 2174 static void gem_init_rings(struct macb *bp) 2175 { 2176 struct macb_queue *queue; 2177 struct macb_dma_desc *desc = NULL; 2178 unsigned int q; 2179 int i; 2180 2181 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2182 for (i = 0; i < bp->tx_ring_size; i++) { 2183 desc = macb_tx_desc(queue, i); 2184 macb_set_addr(bp, desc, 0); 2185 desc->ctrl = MACB_BIT(TX_USED); 2186 } 2187 desc->ctrl |= MACB_BIT(TX_WRAP); 2188 queue->tx_head = 0; 2189 queue->tx_tail = 0; 2190 2191 queue->rx_tail = 0; 2192 queue->rx_prepared_head = 0; 2193 2194 gem_rx_refill(queue); 2195 } 2196 2197 } 2198 2199 static void macb_init_rings(struct macb *bp) 2200 { 2201 int i; 2202 struct macb_dma_desc *desc = NULL; 2203 2204 macb_init_rx_ring(&bp->queues[0]); 2205 2206 for (i = 0; i < bp->tx_ring_size; i++) { 2207 desc = macb_tx_desc(&bp->queues[0], i); 2208 macb_set_addr(bp, desc, 0); 2209 desc->ctrl = MACB_BIT(TX_USED); 2210 } 2211 bp->queues[0].tx_head = 0; 2212 bp->queues[0].tx_tail = 0; 2213 desc->ctrl |= MACB_BIT(TX_WRAP); 2214 } 2215 2216 static void macb_reset_hw(struct macb *bp) 2217 { 2218 struct macb_queue *queue; 2219 unsigned int q; 2220 u32 ctrl = macb_readl(bp, NCR); 2221 2222 /* Disable RX and TX (XXX: Should we halt the transmission 2223 * more gracefully?) 2224 */ 2225 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 2226 2227 /* Clear the stats registers (XXX: Update stats first?) */ 2228 ctrl |= MACB_BIT(CLRSTAT); 2229 2230 macb_writel(bp, NCR, ctrl); 2231 2232 /* Clear all status flags */ 2233 macb_writel(bp, TSR, -1); 2234 macb_writel(bp, RSR, -1); 2235 2236 /* Disable all interrupts */ 2237 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2238 queue_writel(queue, IDR, -1); 2239 queue_readl(queue, ISR); 2240 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 2241 queue_writel(queue, ISR, -1); 2242 } 2243 } 2244 2245 static u32 gem_mdc_clk_div(struct macb *bp) 2246 { 2247 u32 config; 2248 unsigned long pclk_hz = clk_get_rate(bp->pclk); 2249 2250 if (pclk_hz <= 20000000) 2251 config = GEM_BF(CLK, GEM_CLK_DIV8); 2252 else if (pclk_hz <= 40000000) 2253 config = GEM_BF(CLK, GEM_CLK_DIV16); 2254 else if (pclk_hz <= 80000000) 2255 config = GEM_BF(CLK, GEM_CLK_DIV32); 2256 else if (pclk_hz <= 120000000) 2257 config = GEM_BF(CLK, GEM_CLK_DIV48); 2258 else if (pclk_hz <= 160000000) 2259 config = GEM_BF(CLK, GEM_CLK_DIV64); 2260 else 2261 config = GEM_BF(CLK, GEM_CLK_DIV96); 2262 2263 return config; 2264 } 2265 2266 static u32 macb_mdc_clk_div(struct macb *bp) 2267 { 2268 u32 config; 2269 unsigned long pclk_hz; 2270 2271 if (macb_is_gem(bp)) 2272 return gem_mdc_clk_div(bp); 2273 2274 pclk_hz = clk_get_rate(bp->pclk); 2275 if (pclk_hz <= 20000000) 2276 config = MACB_BF(CLK, MACB_CLK_DIV8); 2277 else if (pclk_hz <= 40000000) 2278 config = MACB_BF(CLK, MACB_CLK_DIV16); 2279 else if (pclk_hz <= 80000000) 2280 config = MACB_BF(CLK, MACB_CLK_DIV32); 2281 else 2282 config = MACB_BF(CLK, MACB_CLK_DIV64); 2283 2284 return config; 2285 } 2286 2287 /* Get the DMA bus width field of the network configuration register that we 2288 * should program. We find the width from decoding the design configuration 2289 * register to find the maximum supported data bus width. 2290 */ 2291 static u32 macb_dbw(struct macb *bp) 2292 { 2293 if (!macb_is_gem(bp)) 2294 return 0; 2295 2296 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 2297 case 4: 2298 return GEM_BF(DBW, GEM_DBW128); 2299 case 2: 2300 return GEM_BF(DBW, GEM_DBW64); 2301 case 1: 2302 default: 2303 return GEM_BF(DBW, GEM_DBW32); 2304 } 2305 } 2306 2307 /* Configure the receive DMA engine 2308 * - use the correct receive buffer size 2309 * - set best burst length for DMA operations 2310 * (if not supported by FIFO, it will fallback to default) 2311 * - set both rx/tx packet buffers to full memory size 2312 * These are configurable parameters for GEM. 2313 */ 2314 static void macb_configure_dma(struct macb *bp) 2315 { 2316 struct macb_queue *queue; 2317 u32 buffer_size; 2318 unsigned int q; 2319 u32 dmacfg; 2320 2321 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; 2322 if (macb_is_gem(bp)) { 2323 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 2324 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2325 if (q) 2326 queue_writel(queue, RBQS, buffer_size); 2327 else 2328 dmacfg |= GEM_BF(RXBS, buffer_size); 2329 } 2330 if (bp->dma_burst_length) 2331 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 2332 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 2333 dmacfg &= ~GEM_BIT(ENDIA_PKT); 2334 2335 if (bp->native_io) 2336 dmacfg &= ~GEM_BIT(ENDIA_DESC); 2337 else 2338 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 2339 2340 if (bp->dev->features & NETIF_F_HW_CSUM) 2341 dmacfg |= GEM_BIT(TXCOEN); 2342 else 2343 dmacfg &= ~GEM_BIT(TXCOEN); 2344 2345 dmacfg &= ~GEM_BIT(ADDR64); 2346 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2347 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2348 dmacfg |= GEM_BIT(ADDR64); 2349 #endif 2350 #ifdef CONFIG_MACB_USE_HWSTAMP 2351 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2352 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2353 #endif 2354 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2355 dmacfg); 2356 gem_writel(bp, DMACFG, dmacfg); 2357 } 2358 } 2359 2360 static void macb_init_hw(struct macb *bp) 2361 { 2362 u32 config; 2363 2364 macb_reset_hw(bp); 2365 macb_set_hwaddr(bp); 2366 2367 config = macb_mdc_clk_div(bp); 2368 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2369 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2370 if (bp->caps & MACB_CAPS_JUMBO) 2371 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2372 else 2373 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2374 if (bp->dev->flags & IFF_PROMISC) 2375 config |= MACB_BIT(CAF); /* Copy All Frames */ 2376 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2377 config |= GEM_BIT(RXCOEN); 2378 if (!(bp->dev->flags & IFF_BROADCAST)) 2379 config |= MACB_BIT(NBC); /* No BroadCast */ 2380 config |= macb_dbw(bp); 2381 macb_writel(bp, NCFGR, config); 2382 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2383 gem_writel(bp, JML, bp->jumbo_max_len); 2384 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2385 if (bp->caps & MACB_CAPS_JUMBO) 2386 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2387 2388 macb_configure_dma(bp); 2389 } 2390 2391 /* The hash address register is 64 bits long and takes up two 2392 * locations in the memory map. The least significant bits are stored 2393 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2394 * 2395 * The unicast hash enable and the multicast hash enable bits in the 2396 * network configuration register enable the reception of hash matched 2397 * frames. The destination address is reduced to a 6 bit index into 2398 * the 64 bit hash register using the following hash function. The 2399 * hash function is an exclusive or of every sixth bit of the 2400 * destination address. 2401 * 2402 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2403 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2404 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2405 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2406 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2407 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2408 * 2409 * da[0] represents the least significant bit of the first byte 2410 * received, that is, the multicast/unicast indicator, and da[47] 2411 * represents the most significant bit of the last byte received. If 2412 * the hash index, hi[n], points to a bit that is set in the hash 2413 * register then the frame will be matched according to whether the 2414 * frame is multicast or unicast. A multicast match will be signalled 2415 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2416 * index points to a bit set in the hash register. A unicast match 2417 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2418 * and the hash index points to a bit set in the hash register. To 2419 * receive all multicast frames, the hash register should be set with 2420 * all ones and the multicast hash enable bit should be set in the 2421 * network configuration register. 2422 */ 2423 2424 static inline int hash_bit_value(int bitnr, __u8 *addr) 2425 { 2426 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2427 return 1; 2428 return 0; 2429 } 2430 2431 /* Return the hash index value for the specified address. */ 2432 static int hash_get_index(__u8 *addr) 2433 { 2434 int i, j, bitval; 2435 int hash_index = 0; 2436 2437 for (j = 0; j < 6; j++) { 2438 for (i = 0, bitval = 0; i < 8; i++) 2439 bitval ^= hash_bit_value(i * 6 + j, addr); 2440 2441 hash_index |= (bitval << j); 2442 } 2443 2444 return hash_index; 2445 } 2446 2447 /* Add multicast addresses to the internal multicast-hash table. */ 2448 static void macb_sethashtable(struct net_device *dev) 2449 { 2450 struct netdev_hw_addr *ha; 2451 unsigned long mc_filter[2]; 2452 unsigned int bitnr; 2453 struct macb *bp = netdev_priv(dev); 2454 2455 mc_filter[0] = 0; 2456 mc_filter[1] = 0; 2457 2458 netdev_for_each_mc_addr(ha, dev) { 2459 bitnr = hash_get_index(ha->addr); 2460 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2461 } 2462 2463 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2464 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2465 } 2466 2467 /* Enable/Disable promiscuous and multicast modes. */ 2468 static void macb_set_rx_mode(struct net_device *dev) 2469 { 2470 unsigned long cfg; 2471 struct macb *bp = netdev_priv(dev); 2472 2473 cfg = macb_readl(bp, NCFGR); 2474 2475 if (dev->flags & IFF_PROMISC) { 2476 /* Enable promiscuous mode */ 2477 cfg |= MACB_BIT(CAF); 2478 2479 /* Disable RX checksum offload */ 2480 if (macb_is_gem(bp)) 2481 cfg &= ~GEM_BIT(RXCOEN); 2482 } else { 2483 /* Disable promiscuous mode */ 2484 cfg &= ~MACB_BIT(CAF); 2485 2486 /* Enable RX checksum offload only if requested */ 2487 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2488 cfg |= GEM_BIT(RXCOEN); 2489 } 2490 2491 if (dev->flags & IFF_ALLMULTI) { 2492 /* Enable all multicast mode */ 2493 macb_or_gem_writel(bp, HRB, -1); 2494 macb_or_gem_writel(bp, HRT, -1); 2495 cfg |= MACB_BIT(NCFGR_MTI); 2496 } else if (!netdev_mc_empty(dev)) { 2497 /* Enable specific multicasts */ 2498 macb_sethashtable(dev); 2499 cfg |= MACB_BIT(NCFGR_MTI); 2500 } else if (dev->flags & (~IFF_ALLMULTI)) { 2501 /* Disable all multicast mode */ 2502 macb_or_gem_writel(bp, HRB, 0); 2503 macb_or_gem_writel(bp, HRT, 0); 2504 cfg &= ~MACB_BIT(NCFGR_MTI); 2505 } 2506 2507 macb_writel(bp, NCFGR, cfg); 2508 } 2509 2510 static int macb_open(struct net_device *dev) 2511 { 2512 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2513 struct macb *bp = netdev_priv(dev); 2514 struct macb_queue *queue; 2515 unsigned int q; 2516 int err; 2517 2518 netdev_dbg(bp->dev, "open\n"); 2519 2520 err = pm_runtime_get_sync(&bp->pdev->dev); 2521 if (err < 0) 2522 goto pm_exit; 2523 2524 /* RX buffers initialization */ 2525 macb_init_rx_buffer_size(bp, bufsz); 2526 2527 err = macb_alloc_consistent(bp); 2528 if (err) { 2529 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2530 err); 2531 goto pm_exit; 2532 } 2533 2534 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2535 napi_enable(&queue->napi); 2536 2537 macb_init_hw(bp); 2538 2539 err = macb_phylink_connect(bp); 2540 if (err) 2541 goto pm_exit; 2542 2543 netif_tx_start_all_queues(dev); 2544 2545 if (bp->ptp_info) 2546 bp->ptp_info->ptp_init(dev); 2547 2548 pm_exit: 2549 if (err) { 2550 pm_runtime_put_sync(&bp->pdev->dev); 2551 return err; 2552 } 2553 return 0; 2554 } 2555 2556 static int macb_close(struct net_device *dev) 2557 { 2558 struct macb *bp = netdev_priv(dev); 2559 struct macb_queue *queue; 2560 unsigned long flags; 2561 unsigned int q; 2562 2563 netif_tx_stop_all_queues(dev); 2564 2565 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2566 napi_disable(&queue->napi); 2567 2568 phylink_stop(bp->phylink); 2569 phylink_disconnect_phy(bp->phylink); 2570 2571 spin_lock_irqsave(&bp->lock, flags); 2572 macb_reset_hw(bp); 2573 netif_carrier_off(dev); 2574 spin_unlock_irqrestore(&bp->lock, flags); 2575 2576 macb_free_consistent(bp); 2577 2578 if (bp->ptp_info) 2579 bp->ptp_info->ptp_remove(dev); 2580 2581 pm_runtime_put(&bp->pdev->dev); 2582 2583 return 0; 2584 } 2585 2586 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2587 { 2588 if (netif_running(dev)) 2589 return -EBUSY; 2590 2591 dev->mtu = new_mtu; 2592 2593 return 0; 2594 } 2595 2596 static void gem_update_stats(struct macb *bp) 2597 { 2598 struct macb_queue *queue; 2599 unsigned int i, q, idx; 2600 unsigned long *stat; 2601 2602 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2603 2604 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2605 u32 offset = gem_statistics[i].offset; 2606 u64 val = bp->macb_reg_readl(bp, offset); 2607 2608 bp->ethtool_stats[i] += val; 2609 *p += val; 2610 2611 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2612 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2613 val = bp->macb_reg_readl(bp, offset + 4); 2614 bp->ethtool_stats[i] += ((u64)val) << 32; 2615 *(++p) += val; 2616 } 2617 } 2618 2619 idx = GEM_STATS_LEN; 2620 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2621 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) 2622 bp->ethtool_stats[idx++] = *stat; 2623 } 2624 2625 static struct net_device_stats *gem_get_stats(struct macb *bp) 2626 { 2627 struct gem_stats *hwstat = &bp->hw_stats.gem; 2628 struct net_device_stats *nstat = &bp->dev->stats; 2629 2630 gem_update_stats(bp); 2631 2632 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2633 hwstat->rx_alignment_errors + 2634 hwstat->rx_resource_errors + 2635 hwstat->rx_overruns + 2636 hwstat->rx_oversize_frames + 2637 hwstat->rx_jabbers + 2638 hwstat->rx_undersized_frames + 2639 hwstat->rx_length_field_frame_errors); 2640 nstat->tx_errors = (hwstat->tx_late_collisions + 2641 hwstat->tx_excessive_collisions + 2642 hwstat->tx_underrun + 2643 hwstat->tx_carrier_sense_errors); 2644 nstat->multicast = hwstat->rx_multicast_frames; 2645 nstat->collisions = (hwstat->tx_single_collision_frames + 2646 hwstat->tx_multiple_collision_frames + 2647 hwstat->tx_excessive_collisions); 2648 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2649 hwstat->rx_jabbers + 2650 hwstat->rx_undersized_frames + 2651 hwstat->rx_length_field_frame_errors); 2652 nstat->rx_over_errors = hwstat->rx_resource_errors; 2653 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2654 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2655 nstat->rx_fifo_errors = hwstat->rx_overruns; 2656 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2657 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2658 nstat->tx_fifo_errors = hwstat->tx_underrun; 2659 2660 return nstat; 2661 } 2662 2663 static void gem_get_ethtool_stats(struct net_device *dev, 2664 struct ethtool_stats *stats, u64 *data) 2665 { 2666 struct macb *bp; 2667 2668 bp = netdev_priv(dev); 2669 gem_update_stats(bp); 2670 memcpy(data, &bp->ethtool_stats, sizeof(u64) 2671 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); 2672 } 2673 2674 static int gem_get_sset_count(struct net_device *dev, int sset) 2675 { 2676 struct macb *bp = netdev_priv(dev); 2677 2678 switch (sset) { 2679 case ETH_SS_STATS: 2680 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; 2681 default: 2682 return -EOPNOTSUPP; 2683 } 2684 } 2685 2686 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2687 { 2688 char stat_string[ETH_GSTRING_LEN]; 2689 struct macb *bp = netdev_priv(dev); 2690 struct macb_queue *queue; 2691 unsigned int i; 2692 unsigned int q; 2693 2694 switch (sset) { 2695 case ETH_SS_STATS: 2696 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2697 memcpy(p, gem_statistics[i].stat_string, 2698 ETH_GSTRING_LEN); 2699 2700 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2701 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { 2702 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", 2703 q, queue_statistics[i].stat_string); 2704 memcpy(p, stat_string, ETH_GSTRING_LEN); 2705 } 2706 } 2707 break; 2708 } 2709 } 2710 2711 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2712 { 2713 struct macb *bp = netdev_priv(dev); 2714 struct net_device_stats *nstat = &bp->dev->stats; 2715 struct macb_stats *hwstat = &bp->hw_stats.macb; 2716 2717 if (macb_is_gem(bp)) 2718 return gem_get_stats(bp); 2719 2720 /* read stats from hardware */ 2721 macb_update_stats(bp); 2722 2723 /* Convert HW stats into netdevice stats */ 2724 nstat->rx_errors = (hwstat->rx_fcs_errors + 2725 hwstat->rx_align_errors + 2726 hwstat->rx_resource_errors + 2727 hwstat->rx_overruns + 2728 hwstat->rx_oversize_pkts + 2729 hwstat->rx_jabbers + 2730 hwstat->rx_undersize_pkts + 2731 hwstat->rx_length_mismatch); 2732 nstat->tx_errors = (hwstat->tx_late_cols + 2733 hwstat->tx_excessive_cols + 2734 hwstat->tx_underruns + 2735 hwstat->tx_carrier_errors + 2736 hwstat->sqe_test_errors); 2737 nstat->collisions = (hwstat->tx_single_cols + 2738 hwstat->tx_multiple_cols + 2739 hwstat->tx_excessive_cols); 2740 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2741 hwstat->rx_jabbers + 2742 hwstat->rx_undersize_pkts + 2743 hwstat->rx_length_mismatch); 2744 nstat->rx_over_errors = hwstat->rx_resource_errors + 2745 hwstat->rx_overruns; 2746 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2747 nstat->rx_frame_errors = hwstat->rx_align_errors; 2748 nstat->rx_fifo_errors = hwstat->rx_overruns; 2749 /* XXX: What does "missed" mean? */ 2750 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2751 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2752 nstat->tx_fifo_errors = hwstat->tx_underruns; 2753 /* Don't know about heartbeat or window errors... */ 2754 2755 return nstat; 2756 } 2757 2758 static int macb_get_regs_len(struct net_device *netdev) 2759 { 2760 return MACB_GREGS_NBR * sizeof(u32); 2761 } 2762 2763 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2764 void *p) 2765 { 2766 struct macb *bp = netdev_priv(dev); 2767 unsigned int tail, head; 2768 u32 *regs_buff = p; 2769 2770 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2771 | MACB_GREGS_VERSION; 2772 2773 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2774 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2775 2776 regs_buff[0] = macb_readl(bp, NCR); 2777 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2778 regs_buff[2] = macb_readl(bp, NSR); 2779 regs_buff[3] = macb_readl(bp, TSR); 2780 regs_buff[4] = macb_readl(bp, RBQP); 2781 regs_buff[5] = macb_readl(bp, TBQP); 2782 regs_buff[6] = macb_readl(bp, RSR); 2783 regs_buff[7] = macb_readl(bp, IMR); 2784 2785 regs_buff[8] = tail; 2786 regs_buff[9] = head; 2787 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2788 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2789 2790 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2791 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2792 if (macb_is_gem(bp)) 2793 regs_buff[13] = gem_readl(bp, DMACFG); 2794 } 2795 2796 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2797 { 2798 struct macb *bp = netdev_priv(netdev); 2799 2800 wol->supported = 0; 2801 wol->wolopts = 0; 2802 2803 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) 2804 phylink_ethtool_get_wol(bp->phylink, wol); 2805 } 2806 2807 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2808 { 2809 struct macb *bp = netdev_priv(netdev); 2810 int ret; 2811 2812 ret = phylink_ethtool_set_wol(bp->phylink, wol); 2813 if (!ret) 2814 return 0; 2815 2816 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2817 (wol->wolopts & ~WAKE_MAGIC)) 2818 return -EOPNOTSUPP; 2819 2820 if (wol->wolopts & WAKE_MAGIC) 2821 bp->wol |= MACB_WOL_ENABLED; 2822 else 2823 bp->wol &= ~MACB_WOL_ENABLED; 2824 2825 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2826 2827 return 0; 2828 } 2829 2830 static int macb_get_link_ksettings(struct net_device *netdev, 2831 struct ethtool_link_ksettings *kset) 2832 { 2833 struct macb *bp = netdev_priv(netdev); 2834 2835 return phylink_ethtool_ksettings_get(bp->phylink, kset); 2836 } 2837 2838 static int macb_set_link_ksettings(struct net_device *netdev, 2839 const struct ethtool_link_ksettings *kset) 2840 { 2841 struct macb *bp = netdev_priv(netdev); 2842 2843 return phylink_ethtool_ksettings_set(bp->phylink, kset); 2844 } 2845 2846 static void macb_get_ringparam(struct net_device *netdev, 2847 struct ethtool_ringparam *ring) 2848 { 2849 struct macb *bp = netdev_priv(netdev); 2850 2851 ring->rx_max_pending = MAX_RX_RING_SIZE; 2852 ring->tx_max_pending = MAX_TX_RING_SIZE; 2853 2854 ring->rx_pending = bp->rx_ring_size; 2855 ring->tx_pending = bp->tx_ring_size; 2856 } 2857 2858 static int macb_set_ringparam(struct net_device *netdev, 2859 struct ethtool_ringparam *ring) 2860 { 2861 struct macb *bp = netdev_priv(netdev); 2862 u32 new_rx_size, new_tx_size; 2863 unsigned int reset = 0; 2864 2865 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2866 return -EINVAL; 2867 2868 new_rx_size = clamp_t(u32, ring->rx_pending, 2869 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 2870 new_rx_size = roundup_pow_of_two(new_rx_size); 2871 2872 new_tx_size = clamp_t(u32, ring->tx_pending, 2873 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 2874 new_tx_size = roundup_pow_of_two(new_tx_size); 2875 2876 if ((new_tx_size == bp->tx_ring_size) && 2877 (new_rx_size == bp->rx_ring_size)) { 2878 /* nothing to do */ 2879 return 0; 2880 } 2881 2882 if (netif_running(bp->dev)) { 2883 reset = 1; 2884 macb_close(bp->dev); 2885 } 2886 2887 bp->rx_ring_size = new_rx_size; 2888 bp->tx_ring_size = new_tx_size; 2889 2890 if (reset) 2891 macb_open(bp->dev); 2892 2893 return 0; 2894 } 2895 2896 #ifdef CONFIG_MACB_USE_HWSTAMP 2897 static unsigned int gem_get_tsu_rate(struct macb *bp) 2898 { 2899 struct clk *tsu_clk; 2900 unsigned int tsu_rate; 2901 2902 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 2903 if (!IS_ERR(tsu_clk)) 2904 tsu_rate = clk_get_rate(tsu_clk); 2905 /* try pclk instead */ 2906 else if (!IS_ERR(bp->pclk)) { 2907 tsu_clk = bp->pclk; 2908 tsu_rate = clk_get_rate(tsu_clk); 2909 } else 2910 return -ENOTSUPP; 2911 return tsu_rate; 2912 } 2913 2914 static s32 gem_get_ptp_max_adj(void) 2915 { 2916 return 64000000; 2917 } 2918 2919 static int gem_get_ts_info(struct net_device *dev, 2920 struct ethtool_ts_info *info) 2921 { 2922 struct macb *bp = netdev_priv(dev); 2923 2924 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 2925 ethtool_op_get_ts_info(dev, info); 2926 return 0; 2927 } 2928 2929 info->so_timestamping = 2930 SOF_TIMESTAMPING_TX_SOFTWARE | 2931 SOF_TIMESTAMPING_RX_SOFTWARE | 2932 SOF_TIMESTAMPING_SOFTWARE | 2933 SOF_TIMESTAMPING_TX_HARDWARE | 2934 SOF_TIMESTAMPING_RX_HARDWARE | 2935 SOF_TIMESTAMPING_RAW_HARDWARE; 2936 info->tx_types = 2937 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 2938 (1 << HWTSTAMP_TX_OFF) | 2939 (1 << HWTSTAMP_TX_ON); 2940 info->rx_filters = 2941 (1 << HWTSTAMP_FILTER_NONE) | 2942 (1 << HWTSTAMP_FILTER_ALL); 2943 2944 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 2945 2946 return 0; 2947 } 2948 2949 static struct macb_ptp_info gem_ptp_info = { 2950 .ptp_init = gem_ptp_init, 2951 .ptp_remove = gem_ptp_remove, 2952 .get_ptp_max_adj = gem_get_ptp_max_adj, 2953 .get_tsu_rate = gem_get_tsu_rate, 2954 .get_ts_info = gem_get_ts_info, 2955 .get_hwtst = gem_get_hwtst, 2956 .set_hwtst = gem_set_hwtst, 2957 }; 2958 #endif 2959 2960 static int macb_get_ts_info(struct net_device *netdev, 2961 struct ethtool_ts_info *info) 2962 { 2963 struct macb *bp = netdev_priv(netdev); 2964 2965 if (bp->ptp_info) 2966 return bp->ptp_info->get_ts_info(netdev, info); 2967 2968 return ethtool_op_get_ts_info(netdev, info); 2969 } 2970 2971 static void gem_enable_flow_filters(struct macb *bp, bool enable) 2972 { 2973 struct net_device *netdev = bp->dev; 2974 struct ethtool_rx_fs_item *item; 2975 u32 t2_scr; 2976 int num_t2_scr; 2977 2978 if (!(netdev->features & NETIF_F_NTUPLE)) 2979 return; 2980 2981 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); 2982 2983 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2984 struct ethtool_rx_flow_spec *fs = &item->fs; 2985 struct ethtool_tcpip4_spec *tp4sp_m; 2986 2987 if (fs->location >= num_t2_scr) 2988 continue; 2989 2990 t2_scr = gem_readl_n(bp, SCRT2, fs->location); 2991 2992 /* enable/disable screener regs for the flow entry */ 2993 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); 2994 2995 /* only enable fields with no masking */ 2996 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 2997 2998 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) 2999 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); 3000 else 3001 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); 3002 3003 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) 3004 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); 3005 else 3006 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); 3007 3008 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) 3009 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); 3010 else 3011 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); 3012 3013 gem_writel_n(bp, SCRT2, fs->location, t2_scr); 3014 } 3015 } 3016 3017 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) 3018 { 3019 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; 3020 uint16_t index = fs->location; 3021 u32 w0, w1, t2_scr; 3022 bool cmp_a = false; 3023 bool cmp_b = false; 3024 bool cmp_c = false; 3025 3026 tp4sp_v = &(fs->h_u.tcp_ip4_spec); 3027 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 3028 3029 /* ignore field if any masking set */ 3030 if (tp4sp_m->ip4src == 0xFFFFFFFF) { 3031 /* 1st compare reg - IP source address */ 3032 w0 = 0; 3033 w1 = 0; 3034 w0 = tp4sp_v->ip4src; 3035 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3036 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3037 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); 3038 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); 3039 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); 3040 cmp_a = true; 3041 } 3042 3043 /* ignore field if any masking set */ 3044 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { 3045 /* 2nd compare reg - IP destination address */ 3046 w0 = 0; 3047 w1 = 0; 3048 w0 = tp4sp_v->ip4dst; 3049 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3050 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3051 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); 3052 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); 3053 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); 3054 cmp_b = true; 3055 } 3056 3057 /* ignore both port fields if masking set in both */ 3058 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { 3059 /* 3rd compare reg - source port, destination port */ 3060 w0 = 0; 3061 w1 = 0; 3062 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); 3063 if (tp4sp_m->psrc == tp4sp_m->pdst) { 3064 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); 3065 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3066 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3067 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3068 } else { 3069 /* only one port definition */ 3070 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ 3071 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); 3072 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ 3073 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); 3074 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3075 } else { /* dst port */ 3076 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3077 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); 3078 } 3079 } 3080 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); 3081 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); 3082 cmp_c = true; 3083 } 3084 3085 t2_scr = 0; 3086 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); 3087 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); 3088 if (cmp_a) 3089 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); 3090 if (cmp_b) 3091 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); 3092 if (cmp_c) 3093 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); 3094 gem_writel_n(bp, SCRT2, index, t2_scr); 3095 } 3096 3097 static int gem_add_flow_filter(struct net_device *netdev, 3098 struct ethtool_rxnfc *cmd) 3099 { 3100 struct macb *bp = netdev_priv(netdev); 3101 struct ethtool_rx_flow_spec *fs = &cmd->fs; 3102 struct ethtool_rx_fs_item *item, *newfs; 3103 unsigned long flags; 3104 int ret = -EINVAL; 3105 bool added = false; 3106 3107 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); 3108 if (newfs == NULL) 3109 return -ENOMEM; 3110 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); 3111 3112 netdev_dbg(netdev, 3113 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3114 fs->flow_type, (int)fs->ring_cookie, fs->location, 3115 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3116 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3117 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); 3118 3119 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3120 3121 /* find correct place to add in list */ 3122 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3123 if (item->fs.location > newfs->fs.location) { 3124 list_add_tail(&newfs->list, &item->list); 3125 added = true; 3126 break; 3127 } else if (item->fs.location == fs->location) { 3128 netdev_err(netdev, "Rule not added: location %d not free!\n", 3129 fs->location); 3130 ret = -EBUSY; 3131 goto err; 3132 } 3133 } 3134 if (!added) 3135 list_add_tail(&newfs->list, &bp->rx_fs_list.list); 3136 3137 gem_prog_cmp_regs(bp, fs); 3138 bp->rx_fs_list.count++; 3139 /* enable filtering if NTUPLE on */ 3140 gem_enable_flow_filters(bp, 1); 3141 3142 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3143 return 0; 3144 3145 err: 3146 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3147 kfree(newfs); 3148 return ret; 3149 } 3150 3151 static int gem_del_flow_filter(struct net_device *netdev, 3152 struct ethtool_rxnfc *cmd) 3153 { 3154 struct macb *bp = netdev_priv(netdev); 3155 struct ethtool_rx_fs_item *item; 3156 struct ethtool_rx_flow_spec *fs; 3157 unsigned long flags; 3158 3159 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3160 3161 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3162 if (item->fs.location == cmd->fs.location) { 3163 /* disable screener regs for the flow entry */ 3164 fs = &(item->fs); 3165 netdev_dbg(netdev, 3166 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3167 fs->flow_type, (int)fs->ring_cookie, fs->location, 3168 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3169 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3170 htons(fs->h_u.tcp_ip4_spec.psrc), 3171 htons(fs->h_u.tcp_ip4_spec.pdst)); 3172 3173 gem_writel_n(bp, SCRT2, fs->location, 0); 3174 3175 list_del(&item->list); 3176 bp->rx_fs_list.count--; 3177 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3178 kfree(item); 3179 return 0; 3180 } 3181 } 3182 3183 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3184 return -EINVAL; 3185 } 3186 3187 static int gem_get_flow_entry(struct net_device *netdev, 3188 struct ethtool_rxnfc *cmd) 3189 { 3190 struct macb *bp = netdev_priv(netdev); 3191 struct ethtool_rx_fs_item *item; 3192 3193 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3194 if (item->fs.location == cmd->fs.location) { 3195 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); 3196 return 0; 3197 } 3198 } 3199 return -EINVAL; 3200 } 3201 3202 static int gem_get_all_flow_entries(struct net_device *netdev, 3203 struct ethtool_rxnfc *cmd, u32 *rule_locs) 3204 { 3205 struct macb *bp = netdev_priv(netdev); 3206 struct ethtool_rx_fs_item *item; 3207 uint32_t cnt = 0; 3208 3209 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3210 if (cnt == cmd->rule_cnt) 3211 return -EMSGSIZE; 3212 rule_locs[cnt] = item->fs.location; 3213 cnt++; 3214 } 3215 cmd->data = bp->max_tuples; 3216 cmd->rule_cnt = cnt; 3217 3218 return 0; 3219 } 3220 3221 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3222 u32 *rule_locs) 3223 { 3224 struct macb *bp = netdev_priv(netdev); 3225 int ret = 0; 3226 3227 switch (cmd->cmd) { 3228 case ETHTOOL_GRXRINGS: 3229 cmd->data = bp->num_queues; 3230 break; 3231 case ETHTOOL_GRXCLSRLCNT: 3232 cmd->rule_cnt = bp->rx_fs_list.count; 3233 break; 3234 case ETHTOOL_GRXCLSRULE: 3235 ret = gem_get_flow_entry(netdev, cmd); 3236 break; 3237 case ETHTOOL_GRXCLSRLALL: 3238 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); 3239 break; 3240 default: 3241 netdev_err(netdev, 3242 "Command parameter %d is not supported\n", cmd->cmd); 3243 ret = -EOPNOTSUPP; 3244 } 3245 3246 return ret; 3247 } 3248 3249 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3250 { 3251 struct macb *bp = netdev_priv(netdev); 3252 int ret; 3253 3254 switch (cmd->cmd) { 3255 case ETHTOOL_SRXCLSRLINS: 3256 if ((cmd->fs.location >= bp->max_tuples) 3257 || (cmd->fs.ring_cookie >= bp->num_queues)) { 3258 ret = -EINVAL; 3259 break; 3260 } 3261 ret = gem_add_flow_filter(netdev, cmd); 3262 break; 3263 case ETHTOOL_SRXCLSRLDEL: 3264 ret = gem_del_flow_filter(netdev, cmd); 3265 break; 3266 default: 3267 netdev_err(netdev, 3268 "Command parameter %d is not supported\n", cmd->cmd); 3269 ret = -EOPNOTSUPP; 3270 } 3271 3272 return ret; 3273 } 3274 3275 static const struct ethtool_ops macb_ethtool_ops = { 3276 .get_regs_len = macb_get_regs_len, 3277 .get_regs = macb_get_regs, 3278 .get_link = ethtool_op_get_link, 3279 .get_ts_info = ethtool_op_get_ts_info, 3280 .get_wol = macb_get_wol, 3281 .set_wol = macb_set_wol, 3282 .get_link_ksettings = macb_get_link_ksettings, 3283 .set_link_ksettings = macb_set_link_ksettings, 3284 .get_ringparam = macb_get_ringparam, 3285 .set_ringparam = macb_set_ringparam, 3286 }; 3287 3288 static const struct ethtool_ops gem_ethtool_ops = { 3289 .get_regs_len = macb_get_regs_len, 3290 .get_regs = macb_get_regs, 3291 .get_link = ethtool_op_get_link, 3292 .get_ts_info = macb_get_ts_info, 3293 .get_ethtool_stats = gem_get_ethtool_stats, 3294 .get_strings = gem_get_ethtool_strings, 3295 .get_sset_count = gem_get_sset_count, 3296 .get_link_ksettings = macb_get_link_ksettings, 3297 .set_link_ksettings = macb_set_link_ksettings, 3298 .get_ringparam = macb_get_ringparam, 3299 .set_ringparam = macb_set_ringparam, 3300 .get_rxnfc = gem_get_rxnfc, 3301 .set_rxnfc = gem_set_rxnfc, 3302 }; 3303 3304 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3305 { 3306 struct macb *bp = netdev_priv(dev); 3307 3308 if (!netif_running(dev)) 3309 return -EINVAL; 3310 3311 if (bp->ptp_info) { 3312 switch (cmd) { 3313 case SIOCSHWTSTAMP: 3314 return bp->ptp_info->set_hwtst(dev, rq, cmd); 3315 case SIOCGHWTSTAMP: 3316 return bp->ptp_info->get_hwtst(dev, rq); 3317 } 3318 } 3319 3320 return phylink_mii_ioctl(bp->phylink, rq, cmd); 3321 } 3322 3323 static inline void macb_set_txcsum_feature(struct macb *bp, 3324 netdev_features_t features) 3325 { 3326 u32 val; 3327 3328 if (!macb_is_gem(bp)) 3329 return; 3330 3331 val = gem_readl(bp, DMACFG); 3332 if (features & NETIF_F_HW_CSUM) 3333 val |= GEM_BIT(TXCOEN); 3334 else 3335 val &= ~GEM_BIT(TXCOEN); 3336 3337 gem_writel(bp, DMACFG, val); 3338 } 3339 3340 static inline void macb_set_rxcsum_feature(struct macb *bp, 3341 netdev_features_t features) 3342 { 3343 struct net_device *netdev = bp->dev; 3344 u32 val; 3345 3346 if (!macb_is_gem(bp)) 3347 return; 3348 3349 val = gem_readl(bp, NCFGR); 3350 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) 3351 val |= GEM_BIT(RXCOEN); 3352 else 3353 val &= ~GEM_BIT(RXCOEN); 3354 3355 gem_writel(bp, NCFGR, val); 3356 } 3357 3358 static inline void macb_set_rxflow_feature(struct macb *bp, 3359 netdev_features_t features) 3360 { 3361 if (!macb_is_gem(bp)) 3362 return; 3363 3364 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); 3365 } 3366 3367 static int macb_set_features(struct net_device *netdev, 3368 netdev_features_t features) 3369 { 3370 struct macb *bp = netdev_priv(netdev); 3371 netdev_features_t changed = features ^ netdev->features; 3372 3373 /* TX checksum offload */ 3374 if (changed & NETIF_F_HW_CSUM) 3375 macb_set_txcsum_feature(bp, features); 3376 3377 /* RX checksum offload */ 3378 if (changed & NETIF_F_RXCSUM) 3379 macb_set_rxcsum_feature(bp, features); 3380 3381 /* RX Flow Filters */ 3382 if (changed & NETIF_F_NTUPLE) 3383 macb_set_rxflow_feature(bp, features); 3384 3385 return 0; 3386 } 3387 3388 static void macb_restore_features(struct macb *bp) 3389 { 3390 struct net_device *netdev = bp->dev; 3391 netdev_features_t features = netdev->features; 3392 3393 /* TX checksum offload */ 3394 macb_set_txcsum_feature(bp, features); 3395 3396 /* RX checksum offload */ 3397 macb_set_rxcsum_feature(bp, features); 3398 3399 /* RX Flow Filters */ 3400 macb_set_rxflow_feature(bp, features); 3401 } 3402 3403 static const struct net_device_ops macb_netdev_ops = { 3404 .ndo_open = macb_open, 3405 .ndo_stop = macb_close, 3406 .ndo_start_xmit = macb_start_xmit, 3407 .ndo_set_rx_mode = macb_set_rx_mode, 3408 .ndo_get_stats = macb_get_stats, 3409 .ndo_do_ioctl = macb_ioctl, 3410 .ndo_validate_addr = eth_validate_addr, 3411 .ndo_change_mtu = macb_change_mtu, 3412 .ndo_set_mac_address = eth_mac_addr, 3413 #ifdef CONFIG_NET_POLL_CONTROLLER 3414 .ndo_poll_controller = macb_poll_controller, 3415 #endif 3416 .ndo_set_features = macb_set_features, 3417 .ndo_features_check = macb_features_check, 3418 }; 3419 3420 /* Configure peripheral capabilities according to device tree 3421 * and integration options used 3422 */ 3423 static void macb_configure_caps(struct macb *bp, 3424 const struct macb_config *dt_conf) 3425 { 3426 u32 dcfg; 3427 3428 if (dt_conf) 3429 bp->caps = dt_conf->caps; 3430 3431 if (hw_is_gem(bp->regs, bp->native_io)) { 3432 bp->caps |= MACB_CAPS_MACB_IS_GEM; 3433 3434 dcfg = gem_readl(bp, DCFG1); 3435 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 3436 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 3437 dcfg = gem_readl(bp, DCFG2); 3438 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 3439 bp->caps |= MACB_CAPS_FIFO_MODE; 3440 #ifdef CONFIG_MACB_USE_HWSTAMP 3441 if (gem_has_ptp(bp)) { 3442 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 3443 dev_err(&bp->pdev->dev, 3444 "GEM doesn't support hardware ptp.\n"); 3445 else { 3446 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 3447 bp->ptp_info = &gem_ptp_info; 3448 } 3449 } 3450 #endif 3451 } 3452 3453 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 3454 } 3455 3456 static void macb_probe_queues(void __iomem *mem, 3457 bool native_io, 3458 unsigned int *queue_mask, 3459 unsigned int *num_queues) 3460 { 3461 unsigned int hw_q; 3462 3463 *queue_mask = 0x1; 3464 *num_queues = 1; 3465 3466 /* is it macb or gem ? 3467 * 3468 * We need to read directly from the hardware here because 3469 * we are early in the probe process and don't have the 3470 * MACB_CAPS_MACB_IS_GEM flag positioned 3471 */ 3472 if (!hw_is_gem(mem, native_io)) 3473 return; 3474 3475 /* bit 0 is never set but queue 0 always exists */ 3476 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; 3477 3478 *queue_mask |= 0x1; 3479 3480 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 3481 if (*queue_mask & (1 << hw_q)) 3482 (*num_queues)++; 3483 } 3484 3485 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 3486 struct clk **hclk, struct clk **tx_clk, 3487 struct clk **rx_clk, struct clk **tsu_clk) 3488 { 3489 struct macb_platform_data *pdata; 3490 int err; 3491 3492 pdata = dev_get_platdata(&pdev->dev); 3493 if (pdata) { 3494 *pclk = pdata->pclk; 3495 *hclk = pdata->hclk; 3496 } else { 3497 *pclk = devm_clk_get(&pdev->dev, "pclk"); 3498 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3499 } 3500 3501 if (IS_ERR_OR_NULL(*pclk)) { 3502 err = PTR_ERR(*pclk); 3503 if (!err) 3504 err = -ENODEV; 3505 3506 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); 3507 return err; 3508 } 3509 3510 if (IS_ERR_OR_NULL(*hclk)) { 3511 err = PTR_ERR(*hclk); 3512 if (!err) 3513 err = -ENODEV; 3514 3515 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); 3516 return err; 3517 } 3518 3519 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); 3520 if (IS_ERR(*tx_clk)) 3521 return PTR_ERR(*tx_clk); 3522 3523 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); 3524 if (IS_ERR(*rx_clk)) 3525 return PTR_ERR(*rx_clk); 3526 3527 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); 3528 if (IS_ERR(*tsu_clk)) 3529 return PTR_ERR(*tsu_clk); 3530 3531 err = clk_prepare_enable(*pclk); 3532 if (err) { 3533 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3534 return err; 3535 } 3536 3537 err = clk_prepare_enable(*hclk); 3538 if (err) { 3539 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); 3540 goto err_disable_pclk; 3541 } 3542 3543 err = clk_prepare_enable(*tx_clk); 3544 if (err) { 3545 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 3546 goto err_disable_hclk; 3547 } 3548 3549 err = clk_prepare_enable(*rx_clk); 3550 if (err) { 3551 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 3552 goto err_disable_txclk; 3553 } 3554 3555 err = clk_prepare_enable(*tsu_clk); 3556 if (err) { 3557 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); 3558 goto err_disable_rxclk; 3559 } 3560 3561 return 0; 3562 3563 err_disable_rxclk: 3564 clk_disable_unprepare(*rx_clk); 3565 3566 err_disable_txclk: 3567 clk_disable_unprepare(*tx_clk); 3568 3569 err_disable_hclk: 3570 clk_disable_unprepare(*hclk); 3571 3572 err_disable_pclk: 3573 clk_disable_unprepare(*pclk); 3574 3575 return err; 3576 } 3577 3578 static int macb_init(struct platform_device *pdev) 3579 { 3580 struct net_device *dev = platform_get_drvdata(pdev); 3581 unsigned int hw_q, q; 3582 struct macb *bp = netdev_priv(dev); 3583 struct macb_queue *queue; 3584 int err; 3585 u32 val, reg; 3586 3587 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 3588 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 3589 3590 /* set the queue register mapping once for all: queue0 has a special 3591 * register mapping but we don't want to test the queue index then 3592 * compute the corresponding register offset at run time. 3593 */ 3594 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 3595 if (!(bp->queue_mask & (1 << hw_q))) 3596 continue; 3597 3598 queue = &bp->queues[q]; 3599 queue->bp = bp; 3600 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); 3601 if (hw_q) { 3602 queue->ISR = GEM_ISR(hw_q - 1); 3603 queue->IER = GEM_IER(hw_q - 1); 3604 queue->IDR = GEM_IDR(hw_q - 1); 3605 queue->IMR = GEM_IMR(hw_q - 1); 3606 queue->TBQP = GEM_TBQP(hw_q - 1); 3607 queue->RBQP = GEM_RBQP(hw_q - 1); 3608 queue->RBQS = GEM_RBQS(hw_q - 1); 3609 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3610 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3611 queue->TBQPH = GEM_TBQPH(hw_q - 1); 3612 queue->RBQPH = GEM_RBQPH(hw_q - 1); 3613 } 3614 #endif 3615 } else { 3616 /* queue0 uses legacy registers */ 3617 queue->ISR = MACB_ISR; 3618 queue->IER = MACB_IER; 3619 queue->IDR = MACB_IDR; 3620 queue->IMR = MACB_IMR; 3621 queue->TBQP = MACB_TBQP; 3622 queue->RBQP = MACB_RBQP; 3623 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3624 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3625 queue->TBQPH = MACB_TBQPH; 3626 queue->RBQPH = MACB_RBQPH; 3627 } 3628 #endif 3629 } 3630 3631 /* get irq: here we use the linux queue index, not the hardware 3632 * queue index. the queue irq definitions in the device tree 3633 * must remove the optional gaps that could exist in the 3634 * hardware queue mask. 3635 */ 3636 queue->irq = platform_get_irq(pdev, q); 3637 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 3638 IRQF_SHARED, dev->name, queue); 3639 if (err) { 3640 dev_err(&pdev->dev, 3641 "Unable to request IRQ %d (error %d)\n", 3642 queue->irq, err); 3643 return err; 3644 } 3645 3646 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 3647 q++; 3648 } 3649 3650 dev->netdev_ops = &macb_netdev_ops; 3651 3652 /* setup appropriated routines according to adapter type */ 3653 if (macb_is_gem(bp)) { 3654 bp->max_tx_length = GEM_MAX_TX_LEN; 3655 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 3656 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 3657 bp->macbgem_ops.mog_init_rings = gem_init_rings; 3658 bp->macbgem_ops.mog_rx = gem_rx; 3659 dev->ethtool_ops = &gem_ethtool_ops; 3660 } else { 3661 bp->max_tx_length = MACB_MAX_TX_LEN; 3662 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 3663 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 3664 bp->macbgem_ops.mog_init_rings = macb_init_rings; 3665 bp->macbgem_ops.mog_rx = macb_rx; 3666 dev->ethtool_ops = &macb_ethtool_ops; 3667 } 3668 3669 /* Set features */ 3670 dev->hw_features = NETIF_F_SG; 3671 3672 /* Check LSO capability */ 3673 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 3674 dev->hw_features |= MACB_NETIF_LSO; 3675 3676 /* Checksum offload is only available on gem with packet buffer */ 3677 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 3678 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3679 if (bp->caps & MACB_CAPS_SG_DISABLED) 3680 dev->hw_features &= ~NETIF_F_SG; 3681 dev->features = dev->hw_features; 3682 3683 /* Check RX Flow Filters support. 3684 * Max Rx flows set by availability of screeners & compare regs: 3685 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 3686 */ 3687 reg = gem_readl(bp, DCFG8); 3688 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 3689 GEM_BFEXT(T2SCR, reg)); 3690 if (bp->max_tuples > 0) { 3691 /* also needs one ethtype match to check IPv4 */ 3692 if (GEM_BFEXT(SCR2ETH, reg) > 0) { 3693 /* program this reg now */ 3694 reg = 0; 3695 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); 3696 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); 3697 /* Filtering is supported in hw but don't enable it in kernel now */ 3698 dev->hw_features |= NETIF_F_NTUPLE; 3699 /* init Rx flow definitions */ 3700 INIT_LIST_HEAD(&bp->rx_fs_list.list); 3701 bp->rx_fs_list.count = 0; 3702 spin_lock_init(&bp->rx_fs_lock); 3703 } else 3704 bp->max_tuples = 0; 3705 } 3706 3707 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3708 val = 0; 3709 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 3710 val = GEM_BIT(RGMII); 3711 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3712 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3713 val = MACB_BIT(RMII); 3714 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3715 val = MACB_BIT(MII); 3716 3717 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 3718 val |= MACB_BIT(CLKEN); 3719 3720 macb_or_gem_writel(bp, USRIO, val); 3721 } 3722 3723 /* Set MII management clock divider */ 3724 val = macb_mdc_clk_div(bp); 3725 val |= macb_dbw(bp); 3726 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 3727 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 3728 macb_writel(bp, NCFGR, val); 3729 3730 return 0; 3731 } 3732 3733 #if defined(CONFIG_OF) 3734 /* 1518 rounded up */ 3735 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3736 /* max number of receive buffers */ 3737 #define AT91ETHER_MAX_RX_DESCR 9 3738 3739 static struct sifive_fu540_macb_mgmt *mgmt; 3740 3741 /* Initialize and start the Receiver and Transmit subsystems */ 3742 static int at91ether_start(struct net_device *dev) 3743 { 3744 struct macb *lp = netdev_priv(dev); 3745 struct macb_queue *q = &lp->queues[0]; 3746 struct macb_dma_desc *desc; 3747 dma_addr_t addr; 3748 u32 ctl; 3749 int i; 3750 3751 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 3752 (AT91ETHER_MAX_RX_DESCR * 3753 macb_dma_desc_get_size(lp)), 3754 &q->rx_ring_dma, GFP_KERNEL); 3755 if (!q->rx_ring) 3756 return -ENOMEM; 3757 3758 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 3759 AT91ETHER_MAX_RX_DESCR * 3760 AT91ETHER_MAX_RBUFF_SZ, 3761 &q->rx_buffers_dma, GFP_KERNEL); 3762 if (!q->rx_buffers) { 3763 dma_free_coherent(&lp->pdev->dev, 3764 AT91ETHER_MAX_RX_DESCR * 3765 macb_dma_desc_get_size(lp), 3766 q->rx_ring, q->rx_ring_dma); 3767 q->rx_ring = NULL; 3768 return -ENOMEM; 3769 } 3770 3771 addr = q->rx_buffers_dma; 3772 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 3773 desc = macb_rx_desc(q, i); 3774 macb_set_addr(lp, desc, addr); 3775 desc->ctrl = 0; 3776 addr += AT91ETHER_MAX_RBUFF_SZ; 3777 } 3778 3779 /* Set the Wrap bit on the last descriptor */ 3780 desc->addr |= MACB_BIT(RX_WRAP); 3781 3782 /* Reset buffer index */ 3783 q->rx_tail = 0; 3784 3785 /* Program address of descriptor list in Rx Buffer Queue register */ 3786 macb_writel(lp, RBQP, q->rx_ring_dma); 3787 3788 /* Enable Receive and Transmit */ 3789 ctl = macb_readl(lp, NCR); 3790 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 3791 3792 return 0; 3793 } 3794 3795 /* Open the ethernet interface */ 3796 static int at91ether_open(struct net_device *dev) 3797 { 3798 struct macb *lp = netdev_priv(dev); 3799 u32 ctl; 3800 int ret; 3801 3802 ret = pm_runtime_get_sync(&lp->pdev->dev); 3803 if (ret < 0) 3804 return ret; 3805 3806 /* Clear internal statistics */ 3807 ctl = macb_readl(lp, NCR); 3808 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 3809 3810 macb_set_hwaddr(lp); 3811 3812 ret = at91ether_start(dev); 3813 if (ret) 3814 return ret; 3815 3816 /* Enable MAC interrupts */ 3817 macb_writel(lp, IER, MACB_BIT(RCOMP) | 3818 MACB_BIT(RXUBR) | 3819 MACB_BIT(ISR_TUND) | 3820 MACB_BIT(ISR_RLE) | 3821 MACB_BIT(TCOMP) | 3822 MACB_BIT(ISR_ROVR) | 3823 MACB_BIT(HRESP)); 3824 3825 ret = macb_phylink_connect(lp); 3826 if (ret) 3827 return ret; 3828 3829 netif_start_queue(dev); 3830 3831 return 0; 3832 } 3833 3834 /* Close the interface */ 3835 static int at91ether_close(struct net_device *dev) 3836 { 3837 struct macb *lp = netdev_priv(dev); 3838 struct macb_queue *q = &lp->queues[0]; 3839 u32 ctl; 3840 3841 /* Disable Receiver and Transmitter */ 3842 ctl = macb_readl(lp, NCR); 3843 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 3844 3845 /* Disable MAC interrupts */ 3846 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 3847 MACB_BIT(RXUBR) | 3848 MACB_BIT(ISR_TUND) | 3849 MACB_BIT(ISR_RLE) | 3850 MACB_BIT(TCOMP) | 3851 MACB_BIT(ISR_ROVR) | 3852 MACB_BIT(HRESP)); 3853 3854 netif_stop_queue(dev); 3855 3856 phylink_stop(lp->phylink); 3857 phylink_disconnect_phy(lp->phylink); 3858 3859 dma_free_coherent(&lp->pdev->dev, 3860 AT91ETHER_MAX_RX_DESCR * 3861 macb_dma_desc_get_size(lp), 3862 q->rx_ring, q->rx_ring_dma); 3863 q->rx_ring = NULL; 3864 3865 dma_free_coherent(&lp->pdev->dev, 3866 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, 3867 q->rx_buffers, q->rx_buffers_dma); 3868 q->rx_buffers = NULL; 3869 3870 return pm_runtime_put(&lp->pdev->dev); 3871 } 3872 3873 /* Transmit packet */ 3874 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, 3875 struct net_device *dev) 3876 { 3877 struct macb *lp = netdev_priv(dev); 3878 3879 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 3880 netif_stop_queue(dev); 3881 3882 /* Store packet information (to free when Tx completed) */ 3883 lp->skb = skb; 3884 lp->skb_length = skb->len; 3885 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data, 3886 skb->len, DMA_TO_DEVICE); 3887 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) { 3888 dev_kfree_skb_any(skb); 3889 dev->stats.tx_dropped++; 3890 netdev_err(dev, "%s: DMA mapping error\n", __func__); 3891 return NETDEV_TX_OK; 3892 } 3893 3894 /* Set address of the data in the Transmit Address register */ 3895 macb_writel(lp, TAR, lp->skb_physaddr); 3896 /* Set length of the packet in the Transmit Control register */ 3897 macb_writel(lp, TCR, skb->len); 3898 3899 } else { 3900 netdev_err(dev, "%s called, but device is busy!\n", __func__); 3901 return NETDEV_TX_BUSY; 3902 } 3903 3904 return NETDEV_TX_OK; 3905 } 3906 3907 /* Extract received frame from buffer descriptors and sent to upper layers. 3908 * (Called from interrupt context) 3909 */ 3910 static void at91ether_rx(struct net_device *dev) 3911 { 3912 struct macb *lp = netdev_priv(dev); 3913 struct macb_queue *q = &lp->queues[0]; 3914 struct macb_dma_desc *desc; 3915 unsigned char *p_recv; 3916 struct sk_buff *skb; 3917 unsigned int pktlen; 3918 3919 desc = macb_rx_desc(q, q->rx_tail); 3920 while (desc->addr & MACB_BIT(RX_USED)) { 3921 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 3922 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 3923 skb = netdev_alloc_skb(dev, pktlen + 2); 3924 if (skb) { 3925 skb_reserve(skb, 2); 3926 skb_put_data(skb, p_recv, pktlen); 3927 3928 skb->protocol = eth_type_trans(skb, dev); 3929 dev->stats.rx_packets++; 3930 dev->stats.rx_bytes += pktlen; 3931 netif_rx(skb); 3932 } else { 3933 dev->stats.rx_dropped++; 3934 } 3935 3936 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 3937 dev->stats.multicast++; 3938 3939 /* reset ownership bit */ 3940 desc->addr &= ~MACB_BIT(RX_USED); 3941 3942 /* wrap after last buffer */ 3943 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 3944 q->rx_tail = 0; 3945 else 3946 q->rx_tail++; 3947 3948 desc = macb_rx_desc(q, q->rx_tail); 3949 } 3950 } 3951 3952 /* MAC interrupt handler */ 3953 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 3954 { 3955 struct net_device *dev = dev_id; 3956 struct macb *lp = netdev_priv(dev); 3957 u32 intstatus, ctl; 3958 3959 /* MAC Interrupt Status register indicates what interrupts are pending. 3960 * It is automatically cleared once read. 3961 */ 3962 intstatus = macb_readl(lp, ISR); 3963 3964 /* Receive complete */ 3965 if (intstatus & MACB_BIT(RCOMP)) 3966 at91ether_rx(dev); 3967 3968 /* Transmit complete */ 3969 if (intstatus & MACB_BIT(TCOMP)) { 3970 /* The TCOM bit is set even if the transmission failed */ 3971 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 3972 dev->stats.tx_errors++; 3973 3974 if (lp->skb) { 3975 dev_consume_skb_irq(lp->skb); 3976 lp->skb = NULL; 3977 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr, 3978 lp->skb_length, DMA_TO_DEVICE); 3979 dev->stats.tx_packets++; 3980 dev->stats.tx_bytes += lp->skb_length; 3981 } 3982 netif_wake_queue(dev); 3983 } 3984 3985 /* Work-around for EMAC Errata section 41.3.1 */ 3986 if (intstatus & MACB_BIT(RXUBR)) { 3987 ctl = macb_readl(lp, NCR); 3988 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 3989 wmb(); 3990 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 3991 } 3992 3993 if (intstatus & MACB_BIT(ISR_ROVR)) 3994 netdev_err(dev, "ROVR error\n"); 3995 3996 return IRQ_HANDLED; 3997 } 3998 3999 #ifdef CONFIG_NET_POLL_CONTROLLER 4000 static void at91ether_poll_controller(struct net_device *dev) 4001 { 4002 unsigned long flags; 4003 4004 local_irq_save(flags); 4005 at91ether_interrupt(dev->irq, dev); 4006 local_irq_restore(flags); 4007 } 4008 #endif 4009 4010 static const struct net_device_ops at91ether_netdev_ops = { 4011 .ndo_open = at91ether_open, 4012 .ndo_stop = at91ether_close, 4013 .ndo_start_xmit = at91ether_start_xmit, 4014 .ndo_get_stats = macb_get_stats, 4015 .ndo_set_rx_mode = macb_set_rx_mode, 4016 .ndo_set_mac_address = eth_mac_addr, 4017 .ndo_do_ioctl = macb_ioctl, 4018 .ndo_validate_addr = eth_validate_addr, 4019 #ifdef CONFIG_NET_POLL_CONTROLLER 4020 .ndo_poll_controller = at91ether_poll_controller, 4021 #endif 4022 }; 4023 4024 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 4025 struct clk **hclk, struct clk **tx_clk, 4026 struct clk **rx_clk, struct clk **tsu_clk) 4027 { 4028 int err; 4029 4030 *hclk = NULL; 4031 *tx_clk = NULL; 4032 *rx_clk = NULL; 4033 *tsu_clk = NULL; 4034 4035 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 4036 if (IS_ERR(*pclk)) 4037 return PTR_ERR(*pclk); 4038 4039 err = clk_prepare_enable(*pclk); 4040 if (err) { 4041 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 4042 return err; 4043 } 4044 4045 return 0; 4046 } 4047 4048 static int at91ether_init(struct platform_device *pdev) 4049 { 4050 struct net_device *dev = platform_get_drvdata(pdev); 4051 struct macb *bp = netdev_priv(dev); 4052 int err; 4053 4054 bp->queues[0].bp = bp; 4055 4056 dev->netdev_ops = &at91ether_netdev_ops; 4057 dev->ethtool_ops = &macb_ethtool_ops; 4058 4059 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 4060 0, dev->name, dev); 4061 if (err) 4062 return err; 4063 4064 macb_writel(bp, NCR, 0); 4065 4066 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); 4067 4068 return 0; 4069 } 4070 4071 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, 4072 unsigned long parent_rate) 4073 { 4074 return mgmt->rate; 4075 } 4076 4077 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, 4078 unsigned long *parent_rate) 4079 { 4080 if (WARN_ON(rate < 2500000)) 4081 return 2500000; 4082 else if (rate == 2500000) 4083 return 2500000; 4084 else if (WARN_ON(rate < 13750000)) 4085 return 2500000; 4086 else if (WARN_ON(rate < 25000000)) 4087 return 25000000; 4088 else if (rate == 25000000) 4089 return 25000000; 4090 else if (WARN_ON(rate < 75000000)) 4091 return 25000000; 4092 else if (WARN_ON(rate < 125000000)) 4093 return 125000000; 4094 else if (rate == 125000000) 4095 return 125000000; 4096 4097 WARN_ON(rate > 125000000); 4098 4099 return 125000000; 4100 } 4101 4102 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, 4103 unsigned long parent_rate) 4104 { 4105 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); 4106 if (rate != 125000000) 4107 iowrite32(1, mgmt->reg); 4108 else 4109 iowrite32(0, mgmt->reg); 4110 mgmt->rate = rate; 4111 4112 return 0; 4113 } 4114 4115 static const struct clk_ops fu540_c000_ops = { 4116 .recalc_rate = fu540_macb_tx_recalc_rate, 4117 .round_rate = fu540_macb_tx_round_rate, 4118 .set_rate = fu540_macb_tx_set_rate, 4119 }; 4120 4121 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, 4122 struct clk **hclk, struct clk **tx_clk, 4123 struct clk **rx_clk, struct clk **tsu_clk) 4124 { 4125 struct clk_init_data init; 4126 int err = 0; 4127 4128 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); 4129 if (err) 4130 return err; 4131 4132 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); 4133 if (!mgmt) 4134 return -ENOMEM; 4135 4136 init.name = "sifive-gemgxl-mgmt"; 4137 init.ops = &fu540_c000_ops; 4138 init.flags = 0; 4139 init.num_parents = 0; 4140 4141 mgmt->rate = 0; 4142 mgmt->hw.init = &init; 4143 4144 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); 4145 if (IS_ERR(*tx_clk)) 4146 return PTR_ERR(*tx_clk); 4147 4148 err = clk_prepare_enable(*tx_clk); 4149 if (err) 4150 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 4151 else 4152 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); 4153 4154 return 0; 4155 } 4156 4157 static int fu540_c000_init(struct platform_device *pdev) 4158 { 4159 struct resource *res; 4160 4161 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 4162 if (!res) 4163 return -ENODEV; 4164 4165 mgmt->reg = ioremap(res->start, resource_size(res)); 4166 if (!mgmt->reg) 4167 return -ENOMEM; 4168 4169 return macb_init(pdev); 4170 } 4171 4172 static const struct macb_config fu540_c000_config = { 4173 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | 4174 MACB_CAPS_GEM_HAS_PTP, 4175 .dma_burst_length = 16, 4176 .clk_init = fu540_c000_clk_init, 4177 .init = fu540_c000_init, 4178 .jumbo_max_len = 10240, 4179 }; 4180 4181 static const struct macb_config at91sam9260_config = { 4182 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4183 .clk_init = macb_clk_init, 4184 .init = macb_init, 4185 }; 4186 4187 static const struct macb_config sama5d3macb_config = { 4188 .caps = MACB_CAPS_SG_DISABLED 4189 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4190 .clk_init = macb_clk_init, 4191 .init = macb_init, 4192 }; 4193 4194 static const struct macb_config pc302gem_config = { 4195 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 4196 .dma_burst_length = 16, 4197 .clk_init = macb_clk_init, 4198 .init = macb_init, 4199 }; 4200 4201 static const struct macb_config sama5d2_config = { 4202 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4203 .dma_burst_length = 16, 4204 .clk_init = macb_clk_init, 4205 .init = macb_init, 4206 }; 4207 4208 static const struct macb_config sama5d3_config = { 4209 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 4210 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 4211 .dma_burst_length = 16, 4212 .clk_init = macb_clk_init, 4213 .init = macb_init, 4214 .jumbo_max_len = 10240, 4215 }; 4216 4217 static const struct macb_config sama5d4_config = { 4218 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4219 .dma_burst_length = 4, 4220 .clk_init = macb_clk_init, 4221 .init = macb_init, 4222 }; 4223 4224 static const struct macb_config emac_config = { 4225 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, 4226 .clk_init = at91ether_clk_init, 4227 .init = at91ether_init, 4228 }; 4229 4230 static const struct macb_config np4_config = { 4231 .caps = MACB_CAPS_USRIO_DISABLED, 4232 .clk_init = macb_clk_init, 4233 .init = macb_init, 4234 }; 4235 4236 static const struct macb_config zynqmp_config = { 4237 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4238 MACB_CAPS_JUMBO | 4239 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, 4240 .dma_burst_length = 16, 4241 .clk_init = macb_clk_init, 4242 .init = macb_init, 4243 .jumbo_max_len = 10240, 4244 }; 4245 4246 static const struct macb_config zynq_config = { 4247 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | 4248 MACB_CAPS_NEEDS_RSTONUBR, 4249 .dma_burst_length = 16, 4250 .clk_init = macb_clk_init, 4251 .init = macb_init, 4252 }; 4253 4254 static const struct of_device_id macb_dt_ids[] = { 4255 { .compatible = "cdns,at32ap7000-macb" }, 4256 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 4257 { .compatible = "cdns,macb" }, 4258 { .compatible = "cdns,np4-macb", .data = &np4_config }, 4259 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 4260 { .compatible = "cdns,gem", .data = &pc302gem_config }, 4261 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, 4262 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 4263 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 4264 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, 4265 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 4266 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 4267 { .compatible = "cdns,emac", .data = &emac_config }, 4268 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4269 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4270 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, 4271 { /* sentinel */ } 4272 }; 4273 MODULE_DEVICE_TABLE(of, macb_dt_ids); 4274 #endif /* CONFIG_OF */ 4275 4276 static const struct macb_config default_gem_config = { 4277 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4278 MACB_CAPS_JUMBO | 4279 MACB_CAPS_GEM_HAS_PTP, 4280 .dma_burst_length = 16, 4281 .clk_init = macb_clk_init, 4282 .init = macb_init, 4283 .jumbo_max_len = 10240, 4284 }; 4285 4286 static int macb_probe(struct platform_device *pdev) 4287 { 4288 const struct macb_config *macb_config = &default_gem_config; 4289 int (*clk_init)(struct platform_device *, struct clk **, 4290 struct clk **, struct clk **, struct clk **, 4291 struct clk **) = macb_config->clk_init; 4292 int (*init)(struct platform_device *) = macb_config->init; 4293 struct device_node *np = pdev->dev.of_node; 4294 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 4295 struct clk *tsu_clk = NULL; 4296 unsigned int queue_mask, num_queues; 4297 bool native_io; 4298 phy_interface_t interface; 4299 struct net_device *dev; 4300 struct resource *regs; 4301 void __iomem *mem; 4302 const char *mac; 4303 struct macb *bp; 4304 int err, val; 4305 4306 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4307 mem = devm_ioremap_resource(&pdev->dev, regs); 4308 if (IS_ERR(mem)) 4309 return PTR_ERR(mem); 4310 4311 if (np) { 4312 const struct of_device_id *match; 4313 4314 match = of_match_node(macb_dt_ids, np); 4315 if (match && match->data) { 4316 macb_config = match->data; 4317 clk_init = macb_config->clk_init; 4318 init = macb_config->init; 4319 } 4320 } 4321 4322 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); 4323 if (err) 4324 return err; 4325 4326 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); 4327 pm_runtime_use_autosuspend(&pdev->dev); 4328 pm_runtime_get_noresume(&pdev->dev); 4329 pm_runtime_set_active(&pdev->dev); 4330 pm_runtime_enable(&pdev->dev); 4331 native_io = hw_is_native_io(mem); 4332 4333 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 4334 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 4335 if (!dev) { 4336 err = -ENOMEM; 4337 goto err_disable_clocks; 4338 } 4339 4340 dev->base_addr = regs->start; 4341 4342 SET_NETDEV_DEV(dev, &pdev->dev); 4343 4344 bp = netdev_priv(dev); 4345 bp->pdev = pdev; 4346 bp->dev = dev; 4347 bp->regs = mem; 4348 bp->native_io = native_io; 4349 if (native_io) { 4350 bp->macb_reg_readl = hw_readl_native; 4351 bp->macb_reg_writel = hw_writel_native; 4352 } else { 4353 bp->macb_reg_readl = hw_readl; 4354 bp->macb_reg_writel = hw_writel; 4355 } 4356 bp->num_queues = num_queues; 4357 bp->queue_mask = queue_mask; 4358 if (macb_config) 4359 bp->dma_burst_length = macb_config->dma_burst_length; 4360 bp->pclk = pclk; 4361 bp->hclk = hclk; 4362 bp->tx_clk = tx_clk; 4363 bp->rx_clk = rx_clk; 4364 bp->tsu_clk = tsu_clk; 4365 if (macb_config) 4366 bp->jumbo_max_len = macb_config->jumbo_max_len; 4367 4368 bp->wol = 0; 4369 if (of_get_property(np, "magic-packet", NULL)) 4370 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 4371 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4372 4373 spin_lock_init(&bp->lock); 4374 4375 /* setup capabilities */ 4376 macb_configure_caps(bp, macb_config); 4377 4378 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4379 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 4380 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 4381 bp->hw_dma_cap |= HW_DMA_CAP_64B; 4382 } 4383 #endif 4384 platform_set_drvdata(pdev, dev); 4385 4386 dev->irq = platform_get_irq(pdev, 0); 4387 if (dev->irq < 0) { 4388 err = dev->irq; 4389 goto err_out_free_netdev; 4390 } 4391 4392 /* MTU range: 68 - 1500 or 10240 */ 4393 dev->min_mtu = GEM_MTU_MIN_SIZE; 4394 if (bp->caps & MACB_CAPS_JUMBO) 4395 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 4396 else 4397 dev->max_mtu = ETH_DATA_LEN; 4398 4399 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { 4400 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); 4401 if (val) 4402 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * 4403 macb_dma_desc_get_size(bp); 4404 4405 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); 4406 if (val) 4407 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * 4408 macb_dma_desc_get_size(bp); 4409 } 4410 4411 bp->rx_intr_mask = MACB_RX_INT_FLAGS; 4412 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) 4413 bp->rx_intr_mask |= MACB_BIT(RXUBR); 4414 4415 mac = of_get_mac_address(np); 4416 if (PTR_ERR(mac) == -EPROBE_DEFER) { 4417 err = -EPROBE_DEFER; 4418 goto err_out_free_netdev; 4419 } else if (!IS_ERR_OR_NULL(mac)) { 4420 ether_addr_copy(bp->dev->dev_addr, mac); 4421 } else { 4422 macb_get_hwaddr(bp); 4423 } 4424 4425 err = of_get_phy_mode(np, &interface); 4426 if (err) 4427 /* not found in DT, MII by default */ 4428 bp->phy_interface = PHY_INTERFACE_MODE_MII; 4429 else 4430 bp->phy_interface = interface; 4431 4432 bp->speed = SPEED_UNKNOWN; 4433 4434 /* IP specific init */ 4435 err = init(pdev); 4436 if (err) 4437 goto err_out_free_netdev; 4438 4439 err = macb_mii_init(bp); 4440 if (err) 4441 goto err_out_free_netdev; 4442 4443 netif_carrier_off(dev); 4444 4445 err = register_netdev(dev); 4446 if (err) { 4447 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 4448 goto err_out_unregister_mdio; 4449 } 4450 4451 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, 4452 (unsigned long)bp); 4453 4454 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 4455 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 4456 dev->base_addr, dev->irq, dev->dev_addr); 4457 4458 pm_runtime_mark_last_busy(&bp->pdev->dev); 4459 pm_runtime_put_autosuspend(&bp->pdev->dev); 4460 4461 return 0; 4462 4463 err_out_unregister_mdio: 4464 mdiobus_unregister(bp->mii_bus); 4465 mdiobus_free(bp->mii_bus); 4466 4467 err_out_free_netdev: 4468 free_netdev(dev); 4469 4470 err_disable_clocks: 4471 clk_disable_unprepare(tx_clk); 4472 clk_disable_unprepare(hclk); 4473 clk_disable_unprepare(pclk); 4474 clk_disable_unprepare(rx_clk); 4475 clk_disable_unprepare(tsu_clk); 4476 pm_runtime_disable(&pdev->dev); 4477 pm_runtime_set_suspended(&pdev->dev); 4478 pm_runtime_dont_use_autosuspend(&pdev->dev); 4479 4480 return err; 4481 } 4482 4483 static int macb_remove(struct platform_device *pdev) 4484 { 4485 struct net_device *dev; 4486 struct macb *bp; 4487 4488 dev = platform_get_drvdata(pdev); 4489 4490 if (dev) { 4491 bp = netdev_priv(dev); 4492 mdiobus_unregister(bp->mii_bus); 4493 mdiobus_free(bp->mii_bus); 4494 4495 unregister_netdev(dev); 4496 tasklet_kill(&bp->hresp_err_tasklet); 4497 pm_runtime_disable(&pdev->dev); 4498 pm_runtime_dont_use_autosuspend(&pdev->dev); 4499 if (!pm_runtime_suspended(&pdev->dev)) { 4500 clk_disable_unprepare(bp->tx_clk); 4501 clk_disable_unprepare(bp->hclk); 4502 clk_disable_unprepare(bp->pclk); 4503 clk_disable_unprepare(bp->rx_clk); 4504 clk_disable_unprepare(bp->tsu_clk); 4505 pm_runtime_set_suspended(&pdev->dev); 4506 } 4507 phylink_destroy(bp->phylink); 4508 free_netdev(dev); 4509 } 4510 4511 return 0; 4512 } 4513 4514 static int __maybe_unused macb_suspend(struct device *dev) 4515 { 4516 struct net_device *netdev = dev_get_drvdata(dev); 4517 struct macb *bp = netdev_priv(netdev); 4518 struct macb_queue *queue = bp->queues; 4519 unsigned long flags; 4520 unsigned int q; 4521 4522 if (!netif_running(netdev)) 4523 return 0; 4524 4525 if (bp->wol & MACB_WOL_ENABLED) { 4526 macb_writel(bp, IER, MACB_BIT(WOL)); 4527 macb_writel(bp, WOL, MACB_BIT(MAG)); 4528 enable_irq_wake(bp->queues[0].irq); 4529 netif_device_detach(netdev); 4530 } else { 4531 netif_device_detach(netdev); 4532 for (q = 0, queue = bp->queues; q < bp->num_queues; 4533 ++q, ++queue) 4534 napi_disable(&queue->napi); 4535 rtnl_lock(); 4536 phylink_stop(bp->phylink); 4537 rtnl_unlock(); 4538 spin_lock_irqsave(&bp->lock, flags); 4539 macb_reset_hw(bp); 4540 spin_unlock_irqrestore(&bp->lock, flags); 4541 4542 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4543 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); 4544 4545 if (netdev->hw_features & NETIF_F_NTUPLE) 4546 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); 4547 } 4548 4549 netif_carrier_off(netdev); 4550 if (bp->ptp_info) 4551 bp->ptp_info->ptp_remove(netdev); 4552 pm_runtime_force_suspend(dev); 4553 4554 return 0; 4555 } 4556 4557 static int __maybe_unused macb_resume(struct device *dev) 4558 { 4559 struct net_device *netdev = dev_get_drvdata(dev); 4560 struct macb *bp = netdev_priv(netdev); 4561 struct macb_queue *queue = bp->queues; 4562 unsigned int q; 4563 4564 if (!netif_running(netdev)) 4565 return 0; 4566 4567 pm_runtime_force_resume(dev); 4568 4569 if (bp->wol & MACB_WOL_ENABLED) { 4570 macb_writel(bp, IDR, MACB_BIT(WOL)); 4571 macb_writel(bp, WOL, 0); 4572 disable_irq_wake(bp->queues[0].irq); 4573 } else { 4574 macb_writel(bp, NCR, MACB_BIT(MPE)); 4575 4576 if (netdev->hw_features & NETIF_F_NTUPLE) 4577 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); 4578 4579 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4580 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); 4581 4582 for (q = 0, queue = bp->queues; q < bp->num_queues; 4583 ++q, ++queue) 4584 napi_enable(&queue->napi); 4585 rtnl_lock(); 4586 phylink_start(bp->phylink); 4587 rtnl_unlock(); 4588 } 4589 4590 macb_init_hw(bp); 4591 macb_set_rx_mode(netdev); 4592 macb_restore_features(bp); 4593 netif_device_attach(netdev); 4594 if (bp->ptp_info) 4595 bp->ptp_info->ptp_init(netdev); 4596 4597 return 0; 4598 } 4599 4600 static int __maybe_unused macb_runtime_suspend(struct device *dev) 4601 { 4602 struct net_device *netdev = dev_get_drvdata(dev); 4603 struct macb *bp = netdev_priv(netdev); 4604 4605 if (!(device_may_wakeup(&bp->dev->dev))) { 4606 clk_disable_unprepare(bp->tx_clk); 4607 clk_disable_unprepare(bp->hclk); 4608 clk_disable_unprepare(bp->pclk); 4609 clk_disable_unprepare(bp->rx_clk); 4610 } 4611 clk_disable_unprepare(bp->tsu_clk); 4612 4613 return 0; 4614 } 4615 4616 static int __maybe_unused macb_runtime_resume(struct device *dev) 4617 { 4618 struct net_device *netdev = dev_get_drvdata(dev); 4619 struct macb *bp = netdev_priv(netdev); 4620 4621 if (!(device_may_wakeup(&bp->dev->dev))) { 4622 clk_prepare_enable(bp->pclk); 4623 clk_prepare_enable(bp->hclk); 4624 clk_prepare_enable(bp->tx_clk); 4625 clk_prepare_enable(bp->rx_clk); 4626 } 4627 clk_prepare_enable(bp->tsu_clk); 4628 4629 return 0; 4630 } 4631 4632 static const struct dev_pm_ops macb_pm_ops = { 4633 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) 4634 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) 4635 }; 4636 4637 static struct platform_driver macb_driver = { 4638 .probe = macb_probe, 4639 .remove = macb_remove, 4640 .driver = { 4641 .name = "macb", 4642 .of_match_table = of_match_ptr(macb_dt_ids), 4643 .pm = &macb_pm_ops, 4644 }, 4645 }; 4646 4647 module_platform_driver(macb_driver); 4648 4649 MODULE_LICENSE("GPL"); 4650 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 4651 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 4652 MODULE_ALIAS("platform:macb"); 4653