1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Cadence MACB/GEM Ethernet Controller driver 4 * 5 * Copyright (C) 2004-2006 Atmel Corporation 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/crc32.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/kernel.h> 15 #include <linux/types.h> 16 #include <linux/circ_buf.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/gpio.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/interrupt.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/platform_data/macb.h> 27 #include <linux/platform_device.h> 28 #include <linux/phylink.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/of_gpio.h> 32 #include <linux/of_mdio.h> 33 #include <linux/of_net.h> 34 #include <linux/ip.h> 35 #include <linux/udp.h> 36 #include <linux/tcp.h> 37 #include <linux/iopoll.h> 38 #include <linux/pm_runtime.h> 39 #include "macb.h" 40 41 /* This structure is only used for MACB on SiFive FU540 devices */ 42 struct sifive_fu540_macb_mgmt { 43 void __iomem *reg; 44 unsigned long rate; 45 struct clk_hw hw; 46 }; 47 48 #define MACB_RX_BUFFER_SIZE 128 49 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 50 51 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 52 #define MIN_RX_RING_SIZE 64 53 #define MAX_RX_RING_SIZE 8192 54 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 55 * (bp)->rx_ring_size) 56 57 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 58 #define MIN_TX_RING_SIZE 64 59 #define MAX_TX_RING_SIZE 4096 60 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 61 * (bp)->tx_ring_size) 62 63 /* level of occupied TX descriptors under which we wake up TX process */ 64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 65 66 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) 67 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 68 | MACB_BIT(ISR_RLE) \ 69 | MACB_BIT(TXERR)) 70 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ 71 | MACB_BIT(TXUBR)) 72 73 /* Max length of transmit frame must be a multiple of 8 bytes */ 74 #define MACB_TX_LEN_ALIGN 8 75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 76 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a 77 * false amba_error in TX path from the DMA assuming there is not enough 78 * space in the SRAM (16KB) even when there is. 79 */ 80 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) 81 82 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 83 #define MACB_NETIF_LSO NETIF_F_TSO 84 85 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 86 #define MACB_WOL_ENABLED (0x1 << 1) 87 88 /* Graceful stop timeouts in us. We should allow up to 89 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 90 */ 91 #define MACB_HALT_TIMEOUT 1230 92 93 #define MACB_PM_TIMEOUT 100 /* ms */ 94 95 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ 96 97 /* DMA buffer descriptor might be different size 98 * depends on hardware configuration: 99 * 100 * 1. dma address width 32 bits: 101 * word 1: 32 bit address of Data Buffer 102 * word 2: control 103 * 104 * 2. dma address width 64 bits: 105 * word 1: 32 bit address of Data Buffer 106 * word 2: control 107 * word 3: upper 32 bit address of Data Buffer 108 * word 4: unused 109 * 110 * 3. dma address width 32 bits with hardware timestamping: 111 * word 1: 32 bit address of Data Buffer 112 * word 2: control 113 * word 3: timestamp word 1 114 * word 4: timestamp word 2 115 * 116 * 4. dma address width 64 bits with hardware timestamping: 117 * word 1: 32 bit address of Data Buffer 118 * word 2: control 119 * word 3: upper 32 bit address of Data Buffer 120 * word 4: unused 121 * word 5: timestamp word 1 122 * word 6: timestamp word 2 123 */ 124 static unsigned int macb_dma_desc_get_size(struct macb *bp) 125 { 126 #ifdef MACB_EXT_DESC 127 unsigned int desc_size; 128 129 switch (bp->hw_dma_cap) { 130 case HW_DMA_CAP_64B: 131 desc_size = sizeof(struct macb_dma_desc) 132 + sizeof(struct macb_dma_desc_64); 133 break; 134 case HW_DMA_CAP_PTP: 135 desc_size = sizeof(struct macb_dma_desc) 136 + sizeof(struct macb_dma_desc_ptp); 137 break; 138 case HW_DMA_CAP_64B_PTP: 139 desc_size = sizeof(struct macb_dma_desc) 140 + sizeof(struct macb_dma_desc_64) 141 + sizeof(struct macb_dma_desc_ptp); 142 break; 143 default: 144 desc_size = sizeof(struct macb_dma_desc); 145 } 146 return desc_size; 147 #endif 148 return sizeof(struct macb_dma_desc); 149 } 150 151 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 152 { 153 #ifdef MACB_EXT_DESC 154 switch (bp->hw_dma_cap) { 155 case HW_DMA_CAP_64B: 156 case HW_DMA_CAP_PTP: 157 desc_idx <<= 1; 158 break; 159 case HW_DMA_CAP_64B_PTP: 160 desc_idx *= 3; 161 break; 162 default: 163 break; 164 } 165 #endif 166 return desc_idx; 167 } 168 169 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 170 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 171 { 172 return (struct macb_dma_desc_64 *)((void *)desc 173 + sizeof(struct macb_dma_desc)); 174 } 175 #endif 176 177 /* Ring buffer accessors */ 178 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 179 { 180 return index & (bp->tx_ring_size - 1); 181 } 182 183 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 184 unsigned int index) 185 { 186 index = macb_tx_ring_wrap(queue->bp, index); 187 index = macb_adj_dma_desc_idx(queue->bp, index); 188 return &queue->tx_ring[index]; 189 } 190 191 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 192 unsigned int index) 193 { 194 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 195 } 196 197 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 198 { 199 dma_addr_t offset; 200 201 offset = macb_tx_ring_wrap(queue->bp, index) * 202 macb_dma_desc_get_size(queue->bp); 203 204 return queue->tx_ring_dma + offset; 205 } 206 207 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 208 { 209 return index & (bp->rx_ring_size - 1); 210 } 211 212 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) 213 { 214 index = macb_rx_ring_wrap(queue->bp, index); 215 index = macb_adj_dma_desc_idx(queue->bp, index); 216 return &queue->rx_ring[index]; 217 } 218 219 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) 220 { 221 return queue->rx_buffers + queue->bp->rx_buffer_size * 222 macb_rx_ring_wrap(queue->bp, index); 223 } 224 225 /* I/O accessors */ 226 static u32 hw_readl_native(struct macb *bp, int offset) 227 { 228 return __raw_readl(bp->regs + offset); 229 } 230 231 static void hw_writel_native(struct macb *bp, int offset, u32 value) 232 { 233 __raw_writel(value, bp->regs + offset); 234 } 235 236 static u32 hw_readl(struct macb *bp, int offset) 237 { 238 return readl_relaxed(bp->regs + offset); 239 } 240 241 static void hw_writel(struct macb *bp, int offset, u32 value) 242 { 243 writel_relaxed(value, bp->regs + offset); 244 } 245 246 /* Find the CPU endianness by using the loopback bit of NCR register. When the 247 * CPU is in big endian we need to program swapped mode for management 248 * descriptor access. 249 */ 250 static bool hw_is_native_io(void __iomem *addr) 251 { 252 u32 value = MACB_BIT(LLB); 253 254 __raw_writel(value, addr + MACB_NCR); 255 value = __raw_readl(addr + MACB_NCR); 256 257 /* Write 0 back to disable everything */ 258 __raw_writel(0, addr + MACB_NCR); 259 260 return value == MACB_BIT(LLB); 261 } 262 263 static bool hw_is_gem(void __iomem *addr, bool native_io) 264 { 265 u32 id; 266 267 if (native_io) 268 id = __raw_readl(addr + MACB_MID); 269 else 270 id = readl_relaxed(addr + MACB_MID); 271 272 return MACB_BFEXT(IDNUM, id) >= 0x2; 273 } 274 275 static void macb_set_hwaddr(struct macb *bp) 276 { 277 u32 bottom; 278 u16 top; 279 280 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 281 macb_or_gem_writel(bp, SA1B, bottom); 282 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 283 macb_or_gem_writel(bp, SA1T, top); 284 285 /* Clear unused address register sets */ 286 macb_or_gem_writel(bp, SA2B, 0); 287 macb_or_gem_writel(bp, SA2T, 0); 288 macb_or_gem_writel(bp, SA3B, 0); 289 macb_or_gem_writel(bp, SA3T, 0); 290 macb_or_gem_writel(bp, SA4B, 0); 291 macb_or_gem_writel(bp, SA4T, 0); 292 } 293 294 static void macb_get_hwaddr(struct macb *bp) 295 { 296 u32 bottom; 297 u16 top; 298 u8 addr[6]; 299 int i; 300 301 /* Check all 4 address register for valid address */ 302 for (i = 0; i < 4; i++) { 303 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 304 top = macb_or_gem_readl(bp, SA1T + i * 8); 305 306 addr[0] = bottom & 0xff; 307 addr[1] = (bottom >> 8) & 0xff; 308 addr[2] = (bottom >> 16) & 0xff; 309 addr[3] = (bottom >> 24) & 0xff; 310 addr[4] = top & 0xff; 311 addr[5] = (top >> 8) & 0xff; 312 313 if (is_valid_ether_addr(addr)) { 314 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 315 return; 316 } 317 } 318 319 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 320 eth_hw_addr_random(bp->dev); 321 } 322 323 static int macb_mdio_wait_for_idle(struct macb *bp) 324 { 325 u32 val; 326 327 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), 328 1, MACB_MDIO_TIMEOUT); 329 } 330 331 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 332 { 333 struct macb *bp = bus->priv; 334 int status; 335 336 status = pm_runtime_get_sync(&bp->pdev->dev); 337 if (status < 0) 338 goto mdio_pm_exit; 339 340 status = macb_mdio_wait_for_idle(bp); 341 if (status < 0) 342 goto mdio_read_exit; 343 344 if (regnum & MII_ADDR_C45) { 345 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 346 | MACB_BF(RW, MACB_MAN_C45_ADDR) 347 | MACB_BF(PHYA, mii_id) 348 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 349 | MACB_BF(DATA, regnum & 0xFFFF) 350 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 351 352 status = macb_mdio_wait_for_idle(bp); 353 if (status < 0) 354 goto mdio_read_exit; 355 356 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 357 | MACB_BF(RW, MACB_MAN_C45_READ) 358 | MACB_BF(PHYA, mii_id) 359 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 360 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 361 } else { 362 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 363 | MACB_BF(RW, MACB_MAN_C22_READ) 364 | MACB_BF(PHYA, mii_id) 365 | MACB_BF(REGA, regnum) 366 | MACB_BF(CODE, MACB_MAN_C22_CODE))); 367 } 368 369 status = macb_mdio_wait_for_idle(bp); 370 if (status < 0) 371 goto mdio_read_exit; 372 373 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 374 375 mdio_read_exit: 376 pm_runtime_mark_last_busy(&bp->pdev->dev); 377 pm_runtime_put_autosuspend(&bp->pdev->dev); 378 mdio_pm_exit: 379 return status; 380 } 381 382 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 383 u16 value) 384 { 385 struct macb *bp = bus->priv; 386 int status; 387 388 status = pm_runtime_get_sync(&bp->pdev->dev); 389 if (status < 0) 390 goto mdio_pm_exit; 391 392 status = macb_mdio_wait_for_idle(bp); 393 if (status < 0) 394 goto mdio_write_exit; 395 396 if (regnum & MII_ADDR_C45) { 397 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 398 | MACB_BF(RW, MACB_MAN_C45_ADDR) 399 | MACB_BF(PHYA, mii_id) 400 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 401 | MACB_BF(DATA, regnum & 0xFFFF) 402 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 403 404 status = macb_mdio_wait_for_idle(bp); 405 if (status < 0) 406 goto mdio_write_exit; 407 408 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 409 | MACB_BF(RW, MACB_MAN_C45_WRITE) 410 | MACB_BF(PHYA, mii_id) 411 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 412 | MACB_BF(CODE, MACB_MAN_C45_CODE) 413 | MACB_BF(DATA, value))); 414 } else { 415 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 416 | MACB_BF(RW, MACB_MAN_C22_WRITE) 417 | MACB_BF(PHYA, mii_id) 418 | MACB_BF(REGA, regnum) 419 | MACB_BF(CODE, MACB_MAN_C22_CODE) 420 | MACB_BF(DATA, value))); 421 } 422 423 status = macb_mdio_wait_for_idle(bp); 424 if (status < 0) 425 goto mdio_write_exit; 426 427 mdio_write_exit: 428 pm_runtime_mark_last_busy(&bp->pdev->dev); 429 pm_runtime_put_autosuspend(&bp->pdev->dev); 430 mdio_pm_exit: 431 return status; 432 } 433 434 static void macb_init_buffers(struct macb *bp) 435 { 436 struct macb_queue *queue; 437 unsigned int q; 438 439 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 440 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 441 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 442 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 443 queue_writel(queue, RBQPH, 444 upper_32_bits(queue->rx_ring_dma)); 445 #endif 446 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 447 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 448 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 449 queue_writel(queue, TBQPH, 450 upper_32_bits(queue->tx_ring_dma)); 451 #endif 452 } 453 } 454 455 /** 456 * macb_set_tx_clk() - Set a clock to a new frequency 457 * @clk Pointer to the clock to change 458 * @rate New frequency in Hz 459 * @dev Pointer to the struct net_device 460 */ 461 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 462 { 463 long ferr, rate, rate_rounded; 464 465 if (!clk) 466 return; 467 468 switch (speed) { 469 case SPEED_10: 470 rate = 2500000; 471 break; 472 case SPEED_100: 473 rate = 25000000; 474 break; 475 case SPEED_1000: 476 rate = 125000000; 477 break; 478 default: 479 return; 480 } 481 482 rate_rounded = clk_round_rate(clk, rate); 483 if (rate_rounded < 0) 484 return; 485 486 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 487 * is not satisfied. 488 */ 489 ferr = abs(rate_rounded - rate); 490 ferr = DIV_ROUND_UP(ferr, rate / 100000); 491 if (ferr > 5) 492 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 493 rate); 494 495 if (clk_set_rate(clk, rate_rounded)) 496 netdev_err(dev, "adjusting tx_clk failed.\n"); 497 } 498 499 static void macb_validate(struct phylink_config *config, 500 unsigned long *supported, 501 struct phylink_link_state *state) 502 { 503 struct net_device *ndev = to_net_dev(config->dev); 504 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 505 struct macb *bp = netdev_priv(ndev); 506 507 /* We only support MII, RMII, GMII, RGMII & SGMII. */ 508 if (state->interface != PHY_INTERFACE_MODE_NA && 509 state->interface != PHY_INTERFACE_MODE_MII && 510 state->interface != PHY_INTERFACE_MODE_RMII && 511 state->interface != PHY_INTERFACE_MODE_GMII && 512 state->interface != PHY_INTERFACE_MODE_SGMII && 513 !phy_interface_mode_is_rgmii(state->interface)) { 514 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 515 return; 516 } 517 518 if (!macb_is_gem(bp) && 519 (state->interface == PHY_INTERFACE_MODE_GMII || 520 phy_interface_mode_is_rgmii(state->interface))) { 521 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 522 return; 523 } 524 525 phylink_set_port_modes(mask); 526 phylink_set(mask, Autoneg); 527 phylink_set(mask, Asym_Pause); 528 529 phylink_set(mask, 10baseT_Half); 530 phylink_set(mask, 10baseT_Full); 531 phylink_set(mask, 100baseT_Half); 532 phylink_set(mask, 100baseT_Full); 533 534 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && 535 (state->interface == PHY_INTERFACE_MODE_NA || 536 state->interface == PHY_INTERFACE_MODE_GMII || 537 state->interface == PHY_INTERFACE_MODE_SGMII || 538 phy_interface_mode_is_rgmii(state->interface))) { 539 phylink_set(mask, 1000baseT_Full); 540 phylink_set(mask, 1000baseX_Full); 541 542 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) 543 phylink_set(mask, 1000baseT_Half); 544 } 545 546 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 547 bitmap_and(state->advertising, state->advertising, mask, 548 __ETHTOOL_LINK_MODE_MASK_NBITS); 549 } 550 551 static void macb_mac_pcs_get_state(struct phylink_config *config, 552 struct phylink_link_state *state) 553 { 554 state->link = 0; 555 } 556 557 static void macb_mac_an_restart(struct phylink_config *config) 558 { 559 /* Not supported */ 560 } 561 562 static void macb_mac_config(struct phylink_config *config, unsigned int mode, 563 const struct phylink_link_state *state) 564 { 565 struct net_device *ndev = to_net_dev(config->dev); 566 struct macb *bp = netdev_priv(ndev); 567 unsigned long flags; 568 u32 old_ctrl, ctrl; 569 570 spin_lock_irqsave(&bp->lock, flags); 571 572 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); 573 574 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { 575 if (state->interface == PHY_INTERFACE_MODE_RMII) 576 ctrl |= MACB_BIT(RM9200_RMII); 577 } else { 578 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); 579 580 if (state->interface == PHY_INTERFACE_MODE_SGMII) 581 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 582 } 583 584 /* Apply the new configuration, if any */ 585 if (old_ctrl ^ ctrl) 586 macb_or_gem_writel(bp, NCFGR, ctrl); 587 588 spin_unlock_irqrestore(&bp->lock, flags); 589 } 590 591 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, 592 phy_interface_t interface) 593 { 594 struct net_device *ndev = to_net_dev(config->dev); 595 struct macb *bp = netdev_priv(ndev); 596 struct macb_queue *queue; 597 unsigned int q; 598 u32 ctrl; 599 600 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 601 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 602 queue_writel(queue, IDR, 603 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 604 605 /* Disable Rx and Tx */ 606 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); 607 macb_writel(bp, NCR, ctrl); 608 609 netif_tx_stop_all_queues(ndev); 610 } 611 612 static void macb_mac_link_up(struct phylink_config *config, 613 struct phy_device *phy, 614 unsigned int mode, phy_interface_t interface, 615 int speed, int duplex, 616 bool tx_pause, bool rx_pause) 617 { 618 struct net_device *ndev = to_net_dev(config->dev); 619 struct macb *bp = netdev_priv(ndev); 620 struct macb_queue *queue; 621 unsigned long flags; 622 unsigned int q; 623 u32 ctrl; 624 625 spin_lock_irqsave(&bp->lock, flags); 626 627 ctrl = macb_or_gem_readl(bp, NCFGR); 628 629 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 630 631 if (speed == SPEED_100) 632 ctrl |= MACB_BIT(SPD); 633 634 if (duplex) 635 ctrl |= MACB_BIT(FD); 636 637 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 638 ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(PAE)); 639 640 if (speed == SPEED_1000) 641 ctrl |= GEM_BIT(GBE); 642 643 /* We do not support MLO_PAUSE_RX yet */ 644 if (tx_pause) 645 ctrl |= MACB_BIT(PAE); 646 647 macb_set_tx_clk(bp->tx_clk, speed, ndev); 648 649 /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down 650 * cleared the pipeline and control registers. 651 */ 652 bp->macbgem_ops.mog_init_rings(bp); 653 macb_init_buffers(bp); 654 655 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 656 queue_writel(queue, IER, 657 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 658 } 659 660 macb_or_gem_writel(bp, NCFGR, ctrl); 661 662 spin_unlock_irqrestore(&bp->lock, flags); 663 664 /* Enable Rx and Tx */ 665 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); 666 667 netif_tx_wake_all_queues(ndev); 668 } 669 670 static const struct phylink_mac_ops macb_phylink_ops = { 671 .validate = macb_validate, 672 .mac_pcs_get_state = macb_mac_pcs_get_state, 673 .mac_an_restart = macb_mac_an_restart, 674 .mac_config = macb_mac_config, 675 .mac_link_down = macb_mac_link_down, 676 .mac_link_up = macb_mac_link_up, 677 }; 678 679 static bool macb_phy_handle_exists(struct device_node *dn) 680 { 681 dn = of_parse_phandle(dn, "phy-handle", 0); 682 of_node_put(dn); 683 return dn != NULL; 684 } 685 686 static int macb_phylink_connect(struct macb *bp) 687 { 688 struct device_node *dn = bp->pdev->dev.of_node; 689 struct net_device *dev = bp->dev; 690 struct phy_device *phydev; 691 int ret; 692 693 if (dn) 694 ret = phylink_of_phy_connect(bp->phylink, dn, 0); 695 696 if (!dn || (ret && !macb_phy_handle_exists(dn))) { 697 phydev = phy_find_first(bp->mii_bus); 698 if (!phydev) { 699 netdev_err(dev, "no PHY found\n"); 700 return -ENXIO; 701 } 702 703 /* attach the mac to the phy */ 704 ret = phylink_connect_phy(bp->phylink, phydev); 705 } 706 707 if (ret) { 708 netdev_err(dev, "Could not attach PHY (%d)\n", ret); 709 return ret; 710 } 711 712 phylink_start(bp->phylink); 713 714 return 0; 715 } 716 717 /* based on au1000_eth. c*/ 718 static int macb_mii_probe(struct net_device *dev) 719 { 720 struct macb *bp = netdev_priv(dev); 721 722 bp->phylink_config.dev = &dev->dev; 723 bp->phylink_config.type = PHYLINK_NETDEV; 724 725 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, 726 bp->phy_interface, &macb_phylink_ops); 727 if (IS_ERR(bp->phylink)) { 728 netdev_err(dev, "Could not create a phylink instance (%ld)\n", 729 PTR_ERR(bp->phylink)); 730 return PTR_ERR(bp->phylink); 731 } 732 733 return 0; 734 } 735 736 static int macb_mdiobus_register(struct macb *bp) 737 { 738 struct device_node *child, *np = bp->pdev->dev.of_node; 739 740 if (of_phy_is_fixed_link(np)) 741 return mdiobus_register(bp->mii_bus); 742 743 /* Only create the PHY from the device tree if at least one PHY is 744 * described. Otherwise scan the entire MDIO bus. We do this to support 745 * old device tree that did not follow the best practices and did not 746 * describe their network PHYs. 747 */ 748 for_each_available_child_of_node(np, child) 749 if (of_mdiobus_child_is_phy(child)) { 750 /* The loop increments the child refcount, 751 * decrement it before returning. 752 */ 753 of_node_put(child); 754 755 return of_mdiobus_register(bp->mii_bus, np); 756 } 757 758 return mdiobus_register(bp->mii_bus); 759 } 760 761 static int macb_mii_init(struct macb *bp) 762 { 763 int err = -ENXIO; 764 765 /* Enable management port */ 766 macb_writel(bp, NCR, MACB_BIT(MPE)); 767 768 bp->mii_bus = mdiobus_alloc(); 769 if (!bp->mii_bus) { 770 err = -ENOMEM; 771 goto err_out; 772 } 773 774 bp->mii_bus->name = "MACB_mii_bus"; 775 bp->mii_bus->read = &macb_mdio_read; 776 bp->mii_bus->write = &macb_mdio_write; 777 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 778 bp->pdev->name, bp->pdev->id); 779 bp->mii_bus->priv = bp; 780 bp->mii_bus->parent = &bp->pdev->dev; 781 782 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 783 784 err = macb_mdiobus_register(bp); 785 if (err) 786 goto err_out_free_mdiobus; 787 788 err = macb_mii_probe(bp->dev); 789 if (err) 790 goto err_out_unregister_bus; 791 792 return 0; 793 794 err_out_unregister_bus: 795 mdiobus_unregister(bp->mii_bus); 796 err_out_free_mdiobus: 797 mdiobus_free(bp->mii_bus); 798 err_out: 799 return err; 800 } 801 802 static void macb_update_stats(struct macb *bp) 803 { 804 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 805 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 806 int offset = MACB_PFR; 807 808 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 809 810 for (; p < end; p++, offset += 4) 811 *p += bp->macb_reg_readl(bp, offset); 812 } 813 814 static int macb_halt_tx(struct macb *bp) 815 { 816 unsigned long halt_time, timeout; 817 u32 status; 818 819 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 820 821 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 822 do { 823 halt_time = jiffies; 824 status = macb_readl(bp, TSR); 825 if (!(status & MACB_BIT(TGO))) 826 return 0; 827 828 udelay(250); 829 } while (time_before(halt_time, timeout)); 830 831 return -ETIMEDOUT; 832 } 833 834 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 835 { 836 if (tx_skb->mapping) { 837 if (tx_skb->mapped_as_page) 838 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 839 tx_skb->size, DMA_TO_DEVICE); 840 else 841 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 842 tx_skb->size, DMA_TO_DEVICE); 843 tx_skb->mapping = 0; 844 } 845 846 if (tx_skb->skb) { 847 dev_kfree_skb_any(tx_skb->skb); 848 tx_skb->skb = NULL; 849 } 850 } 851 852 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 853 { 854 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 855 struct macb_dma_desc_64 *desc_64; 856 857 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 858 desc_64 = macb_64b_desc(bp, desc); 859 desc_64->addrh = upper_32_bits(addr); 860 /* The low bits of RX address contain the RX_USED bit, clearing 861 * of which allows packet RX. Make sure the high bits are also 862 * visible to HW at that point. 863 */ 864 dma_wmb(); 865 } 866 #endif 867 desc->addr = lower_32_bits(addr); 868 } 869 870 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 871 { 872 dma_addr_t addr = 0; 873 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 874 struct macb_dma_desc_64 *desc_64; 875 876 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 877 desc_64 = macb_64b_desc(bp, desc); 878 addr = ((u64)(desc_64->addrh) << 32); 879 } 880 #endif 881 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 882 return addr; 883 } 884 885 static void macb_tx_error_task(struct work_struct *work) 886 { 887 struct macb_queue *queue = container_of(work, struct macb_queue, 888 tx_error_task); 889 struct macb *bp = queue->bp; 890 struct macb_tx_skb *tx_skb; 891 struct macb_dma_desc *desc; 892 struct sk_buff *skb; 893 unsigned int tail; 894 unsigned long flags; 895 896 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 897 (unsigned int)(queue - bp->queues), 898 queue->tx_tail, queue->tx_head); 899 900 /* Prevent the queue IRQ handlers from running: each of them may call 901 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 902 * As explained below, we have to halt the transmission before updating 903 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 904 * network engine about the macb/gem being halted. 905 */ 906 spin_lock_irqsave(&bp->lock, flags); 907 908 /* Make sure nobody is trying to queue up new packets */ 909 netif_tx_stop_all_queues(bp->dev); 910 911 /* Stop transmission now 912 * (in case we have just queued new packets) 913 * macb/gem must be halted to write TBQP register 914 */ 915 if (macb_halt_tx(bp)) 916 /* Just complain for now, reinitializing TX path can be good */ 917 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 918 919 /* Treat frames in TX queue including the ones that caused the error. 920 * Free transmit buffers in upper layer. 921 */ 922 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 923 u32 ctrl; 924 925 desc = macb_tx_desc(queue, tail); 926 ctrl = desc->ctrl; 927 tx_skb = macb_tx_skb(queue, tail); 928 skb = tx_skb->skb; 929 930 if (ctrl & MACB_BIT(TX_USED)) { 931 /* skb is set for the last buffer of the frame */ 932 while (!skb) { 933 macb_tx_unmap(bp, tx_skb); 934 tail++; 935 tx_skb = macb_tx_skb(queue, tail); 936 skb = tx_skb->skb; 937 } 938 939 /* ctrl still refers to the first buffer descriptor 940 * since it's the only one written back by the hardware 941 */ 942 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 943 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 944 macb_tx_ring_wrap(bp, tail), 945 skb->data); 946 bp->dev->stats.tx_packets++; 947 queue->stats.tx_packets++; 948 bp->dev->stats.tx_bytes += skb->len; 949 queue->stats.tx_bytes += skb->len; 950 } 951 } else { 952 /* "Buffers exhausted mid-frame" errors may only happen 953 * if the driver is buggy, so complain loudly about 954 * those. Statistics are updated by hardware. 955 */ 956 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 957 netdev_err(bp->dev, 958 "BUG: TX buffers exhausted mid-frame\n"); 959 960 desc->ctrl = ctrl | MACB_BIT(TX_USED); 961 } 962 963 macb_tx_unmap(bp, tx_skb); 964 } 965 966 /* Set end of TX queue */ 967 desc = macb_tx_desc(queue, 0); 968 macb_set_addr(bp, desc, 0); 969 desc->ctrl = MACB_BIT(TX_USED); 970 971 /* Make descriptor updates visible to hardware */ 972 wmb(); 973 974 /* Reinitialize the TX desc queue */ 975 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 976 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 977 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 978 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 979 #endif 980 /* Make TX ring reflect state of hardware */ 981 queue->tx_head = 0; 982 queue->tx_tail = 0; 983 984 /* Housework before enabling TX IRQ */ 985 macb_writel(bp, TSR, macb_readl(bp, TSR)); 986 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 987 988 /* Now we are ready to start transmission again */ 989 netif_tx_start_all_queues(bp->dev); 990 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 991 992 spin_unlock_irqrestore(&bp->lock, flags); 993 } 994 995 static void macb_tx_interrupt(struct macb_queue *queue) 996 { 997 unsigned int tail; 998 unsigned int head; 999 u32 status; 1000 struct macb *bp = queue->bp; 1001 u16 queue_index = queue - bp->queues; 1002 1003 status = macb_readl(bp, TSR); 1004 macb_writel(bp, TSR, status); 1005 1006 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1007 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 1008 1009 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 1010 (unsigned long)status); 1011 1012 head = queue->tx_head; 1013 for (tail = queue->tx_tail; tail != head; tail++) { 1014 struct macb_tx_skb *tx_skb; 1015 struct sk_buff *skb; 1016 struct macb_dma_desc *desc; 1017 u32 ctrl; 1018 1019 desc = macb_tx_desc(queue, tail); 1020 1021 /* Make hw descriptor updates visible to CPU */ 1022 rmb(); 1023 1024 ctrl = desc->ctrl; 1025 1026 /* TX_USED bit is only set by hardware on the very first buffer 1027 * descriptor of the transmitted frame. 1028 */ 1029 if (!(ctrl & MACB_BIT(TX_USED))) 1030 break; 1031 1032 /* Process all buffers of the current transmitted frame */ 1033 for (;; tail++) { 1034 tx_skb = macb_tx_skb(queue, tail); 1035 skb = tx_skb->skb; 1036 1037 /* First, update TX stats if needed */ 1038 if (skb) { 1039 if (unlikely(skb_shinfo(skb)->tx_flags & 1040 SKBTX_HW_TSTAMP) && 1041 gem_ptp_do_txstamp(queue, skb, desc) == 0) { 1042 /* skb now belongs to timestamp buffer 1043 * and will be removed later 1044 */ 1045 tx_skb->skb = NULL; 1046 } 1047 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 1048 macb_tx_ring_wrap(bp, tail), 1049 skb->data); 1050 bp->dev->stats.tx_packets++; 1051 queue->stats.tx_packets++; 1052 bp->dev->stats.tx_bytes += skb->len; 1053 queue->stats.tx_bytes += skb->len; 1054 } 1055 1056 /* Now we can safely release resources */ 1057 macb_tx_unmap(bp, tx_skb); 1058 1059 /* skb is set only for the last buffer of the frame. 1060 * WARNING: at this point skb has been freed by 1061 * macb_tx_unmap(). 1062 */ 1063 if (skb) 1064 break; 1065 } 1066 } 1067 1068 queue->tx_tail = tail; 1069 if (__netif_subqueue_stopped(bp->dev, queue_index) && 1070 CIRC_CNT(queue->tx_head, queue->tx_tail, 1071 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 1072 netif_wake_subqueue(bp->dev, queue_index); 1073 } 1074 1075 static void gem_rx_refill(struct macb_queue *queue) 1076 { 1077 unsigned int entry; 1078 struct sk_buff *skb; 1079 dma_addr_t paddr; 1080 struct macb *bp = queue->bp; 1081 struct macb_dma_desc *desc; 1082 1083 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, 1084 bp->rx_ring_size) > 0) { 1085 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); 1086 1087 /* Make hw descriptor updates visible to CPU */ 1088 rmb(); 1089 1090 queue->rx_prepared_head++; 1091 desc = macb_rx_desc(queue, entry); 1092 1093 if (!queue->rx_skbuff[entry]) { 1094 /* allocate sk_buff for this free entry in ring */ 1095 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 1096 if (unlikely(!skb)) { 1097 netdev_err(bp->dev, 1098 "Unable to allocate sk_buff\n"); 1099 break; 1100 } 1101 1102 /* now fill corresponding descriptor entry */ 1103 paddr = dma_map_single(&bp->pdev->dev, skb->data, 1104 bp->rx_buffer_size, 1105 DMA_FROM_DEVICE); 1106 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 1107 dev_kfree_skb(skb); 1108 break; 1109 } 1110 1111 queue->rx_skbuff[entry] = skb; 1112 1113 if (entry == bp->rx_ring_size - 1) 1114 paddr |= MACB_BIT(RX_WRAP); 1115 desc->ctrl = 0; 1116 /* Setting addr clears RX_USED and allows reception, 1117 * make sure ctrl is cleared first to avoid a race. 1118 */ 1119 dma_wmb(); 1120 macb_set_addr(bp, desc, paddr); 1121 1122 /* properly align Ethernet header */ 1123 skb_reserve(skb, NET_IP_ALIGN); 1124 } else { 1125 desc->ctrl = 0; 1126 dma_wmb(); 1127 desc->addr &= ~MACB_BIT(RX_USED); 1128 } 1129 } 1130 1131 /* Make descriptor updates visible to hardware */ 1132 wmb(); 1133 1134 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", 1135 queue, queue->rx_prepared_head, queue->rx_tail); 1136 } 1137 1138 /* Mark DMA descriptors from begin up to and not including end as unused */ 1139 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, 1140 unsigned int end) 1141 { 1142 unsigned int frag; 1143 1144 for (frag = begin; frag != end; frag++) { 1145 struct macb_dma_desc *desc = macb_rx_desc(queue, frag); 1146 1147 desc->addr &= ~MACB_BIT(RX_USED); 1148 } 1149 1150 /* Make descriptor updates visible to hardware */ 1151 wmb(); 1152 1153 /* When this happens, the hardware stats registers for 1154 * whatever caused this is updated, so we don't have to record 1155 * anything. 1156 */ 1157 } 1158 1159 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, 1160 int budget) 1161 { 1162 struct macb *bp = queue->bp; 1163 unsigned int len; 1164 unsigned int entry; 1165 struct sk_buff *skb; 1166 struct macb_dma_desc *desc; 1167 int count = 0; 1168 1169 while (count < budget) { 1170 u32 ctrl; 1171 dma_addr_t addr; 1172 bool rxused; 1173 1174 entry = macb_rx_ring_wrap(bp, queue->rx_tail); 1175 desc = macb_rx_desc(queue, entry); 1176 1177 /* Make hw descriptor updates visible to CPU */ 1178 rmb(); 1179 1180 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 1181 addr = macb_get_addr(bp, desc); 1182 1183 if (!rxused) 1184 break; 1185 1186 /* Ensure ctrl is at least as up-to-date as rxused */ 1187 dma_rmb(); 1188 1189 ctrl = desc->ctrl; 1190 1191 queue->rx_tail++; 1192 count++; 1193 1194 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 1195 netdev_err(bp->dev, 1196 "not whole frame pointed by descriptor\n"); 1197 bp->dev->stats.rx_dropped++; 1198 queue->stats.rx_dropped++; 1199 break; 1200 } 1201 skb = queue->rx_skbuff[entry]; 1202 if (unlikely(!skb)) { 1203 netdev_err(bp->dev, 1204 "inconsistent Rx descriptor chain\n"); 1205 bp->dev->stats.rx_dropped++; 1206 queue->stats.rx_dropped++; 1207 break; 1208 } 1209 /* now everything is ready for receiving packet */ 1210 queue->rx_skbuff[entry] = NULL; 1211 len = ctrl & bp->rx_frm_len_mask; 1212 1213 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1214 1215 skb_put(skb, len); 1216 dma_unmap_single(&bp->pdev->dev, addr, 1217 bp->rx_buffer_size, DMA_FROM_DEVICE); 1218 1219 skb->protocol = eth_type_trans(skb, bp->dev); 1220 skb_checksum_none_assert(skb); 1221 if (bp->dev->features & NETIF_F_RXCSUM && 1222 !(bp->dev->flags & IFF_PROMISC) && 1223 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1224 skb->ip_summed = CHECKSUM_UNNECESSARY; 1225 1226 bp->dev->stats.rx_packets++; 1227 queue->stats.rx_packets++; 1228 bp->dev->stats.rx_bytes += skb->len; 1229 queue->stats.rx_bytes += skb->len; 1230 1231 gem_ptp_do_rxstamp(bp, skb, desc); 1232 1233 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1234 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1235 skb->len, skb->csum); 1236 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1237 skb_mac_header(skb), 16, true); 1238 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1239 skb->data, 32, true); 1240 #endif 1241 1242 napi_gro_receive(napi, skb); 1243 } 1244 1245 gem_rx_refill(queue); 1246 1247 return count; 1248 } 1249 1250 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, 1251 unsigned int first_frag, unsigned int last_frag) 1252 { 1253 unsigned int len; 1254 unsigned int frag; 1255 unsigned int offset; 1256 struct sk_buff *skb; 1257 struct macb_dma_desc *desc; 1258 struct macb *bp = queue->bp; 1259 1260 desc = macb_rx_desc(queue, last_frag); 1261 len = desc->ctrl & bp->rx_frm_len_mask; 1262 1263 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1264 macb_rx_ring_wrap(bp, first_frag), 1265 macb_rx_ring_wrap(bp, last_frag), len); 1266 1267 /* The ethernet header starts NET_IP_ALIGN bytes into the 1268 * first buffer. Since the header is 14 bytes, this makes the 1269 * payload word-aligned. 1270 * 1271 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1272 * the two padding bytes into the skb so that we avoid hitting 1273 * the slowpath in memcpy(), and pull them off afterwards. 1274 */ 1275 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1276 if (!skb) { 1277 bp->dev->stats.rx_dropped++; 1278 for (frag = first_frag; ; frag++) { 1279 desc = macb_rx_desc(queue, frag); 1280 desc->addr &= ~MACB_BIT(RX_USED); 1281 if (frag == last_frag) 1282 break; 1283 } 1284 1285 /* Make descriptor updates visible to hardware */ 1286 wmb(); 1287 1288 return 1; 1289 } 1290 1291 offset = 0; 1292 len += NET_IP_ALIGN; 1293 skb_checksum_none_assert(skb); 1294 skb_put(skb, len); 1295 1296 for (frag = first_frag; ; frag++) { 1297 unsigned int frag_len = bp->rx_buffer_size; 1298 1299 if (offset + frag_len > len) { 1300 if (unlikely(frag != last_frag)) { 1301 dev_kfree_skb_any(skb); 1302 return -1; 1303 } 1304 frag_len = len - offset; 1305 } 1306 skb_copy_to_linear_data_offset(skb, offset, 1307 macb_rx_buffer(queue, frag), 1308 frag_len); 1309 offset += bp->rx_buffer_size; 1310 desc = macb_rx_desc(queue, frag); 1311 desc->addr &= ~MACB_BIT(RX_USED); 1312 1313 if (frag == last_frag) 1314 break; 1315 } 1316 1317 /* Make descriptor updates visible to hardware */ 1318 wmb(); 1319 1320 __skb_pull(skb, NET_IP_ALIGN); 1321 skb->protocol = eth_type_trans(skb, bp->dev); 1322 1323 bp->dev->stats.rx_packets++; 1324 bp->dev->stats.rx_bytes += skb->len; 1325 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1326 skb->len, skb->csum); 1327 napi_gro_receive(napi, skb); 1328 1329 return 0; 1330 } 1331 1332 static inline void macb_init_rx_ring(struct macb_queue *queue) 1333 { 1334 struct macb *bp = queue->bp; 1335 dma_addr_t addr; 1336 struct macb_dma_desc *desc = NULL; 1337 int i; 1338 1339 addr = queue->rx_buffers_dma; 1340 for (i = 0; i < bp->rx_ring_size; i++) { 1341 desc = macb_rx_desc(queue, i); 1342 macb_set_addr(bp, desc, addr); 1343 desc->ctrl = 0; 1344 addr += bp->rx_buffer_size; 1345 } 1346 desc->addr |= MACB_BIT(RX_WRAP); 1347 queue->rx_tail = 0; 1348 } 1349 1350 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, 1351 int budget) 1352 { 1353 struct macb *bp = queue->bp; 1354 bool reset_rx_queue = false; 1355 int received = 0; 1356 unsigned int tail; 1357 int first_frag = -1; 1358 1359 for (tail = queue->rx_tail; budget > 0; tail++) { 1360 struct macb_dma_desc *desc = macb_rx_desc(queue, tail); 1361 u32 ctrl; 1362 1363 /* Make hw descriptor updates visible to CPU */ 1364 rmb(); 1365 1366 if (!(desc->addr & MACB_BIT(RX_USED))) 1367 break; 1368 1369 /* Ensure ctrl is at least as up-to-date as addr */ 1370 dma_rmb(); 1371 1372 ctrl = desc->ctrl; 1373 1374 if (ctrl & MACB_BIT(RX_SOF)) { 1375 if (first_frag != -1) 1376 discard_partial_frame(queue, first_frag, tail); 1377 first_frag = tail; 1378 } 1379 1380 if (ctrl & MACB_BIT(RX_EOF)) { 1381 int dropped; 1382 1383 if (unlikely(first_frag == -1)) { 1384 reset_rx_queue = true; 1385 continue; 1386 } 1387 1388 dropped = macb_rx_frame(queue, napi, first_frag, tail); 1389 first_frag = -1; 1390 if (unlikely(dropped < 0)) { 1391 reset_rx_queue = true; 1392 continue; 1393 } 1394 if (!dropped) { 1395 received++; 1396 budget--; 1397 } 1398 } 1399 } 1400 1401 if (unlikely(reset_rx_queue)) { 1402 unsigned long flags; 1403 u32 ctrl; 1404 1405 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1406 1407 spin_lock_irqsave(&bp->lock, flags); 1408 1409 ctrl = macb_readl(bp, NCR); 1410 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1411 1412 macb_init_rx_ring(queue); 1413 queue_writel(queue, RBQP, queue->rx_ring_dma); 1414 1415 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1416 1417 spin_unlock_irqrestore(&bp->lock, flags); 1418 return received; 1419 } 1420 1421 if (first_frag != -1) 1422 queue->rx_tail = first_frag; 1423 else 1424 queue->rx_tail = tail; 1425 1426 return received; 1427 } 1428 1429 static int macb_poll(struct napi_struct *napi, int budget) 1430 { 1431 struct macb_queue *queue = container_of(napi, struct macb_queue, napi); 1432 struct macb *bp = queue->bp; 1433 int work_done; 1434 u32 status; 1435 1436 status = macb_readl(bp, RSR); 1437 macb_writel(bp, RSR, status); 1438 1439 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1440 (unsigned long)status, budget); 1441 1442 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); 1443 if (work_done < budget) { 1444 napi_complete_done(napi, work_done); 1445 1446 /* Packets received while interrupts were disabled */ 1447 status = macb_readl(bp, RSR); 1448 if (status) { 1449 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1450 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1451 napi_reschedule(napi); 1452 } else { 1453 queue_writel(queue, IER, bp->rx_intr_mask); 1454 } 1455 } 1456 1457 /* TODO: Handle errors */ 1458 1459 return work_done; 1460 } 1461 1462 static void macb_hresp_error_task(unsigned long data) 1463 { 1464 struct macb *bp = (struct macb *)data; 1465 struct net_device *dev = bp->dev; 1466 struct macb_queue *queue = bp->queues; 1467 unsigned int q; 1468 u32 ctrl; 1469 1470 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1471 queue_writel(queue, IDR, bp->rx_intr_mask | 1472 MACB_TX_INT_FLAGS | 1473 MACB_BIT(HRESP)); 1474 } 1475 ctrl = macb_readl(bp, NCR); 1476 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 1477 macb_writel(bp, NCR, ctrl); 1478 1479 netif_tx_stop_all_queues(dev); 1480 netif_carrier_off(dev); 1481 1482 bp->macbgem_ops.mog_init_rings(bp); 1483 1484 /* Initialize TX and RX buffers */ 1485 macb_init_buffers(bp); 1486 1487 /* Enable interrupts */ 1488 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1489 queue_writel(queue, IER, 1490 bp->rx_intr_mask | 1491 MACB_TX_INT_FLAGS | 1492 MACB_BIT(HRESP)); 1493 1494 ctrl |= MACB_BIT(RE) | MACB_BIT(TE); 1495 macb_writel(bp, NCR, ctrl); 1496 1497 netif_carrier_on(dev); 1498 netif_tx_start_all_queues(dev); 1499 } 1500 1501 static void macb_tx_restart(struct macb_queue *queue) 1502 { 1503 unsigned int head = queue->tx_head; 1504 unsigned int tail = queue->tx_tail; 1505 struct macb *bp = queue->bp; 1506 1507 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1508 queue_writel(queue, ISR, MACB_BIT(TXUBR)); 1509 1510 if (head == tail) 1511 return; 1512 1513 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1514 } 1515 1516 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1517 { 1518 struct macb_queue *queue = dev_id; 1519 struct macb *bp = queue->bp; 1520 struct net_device *dev = bp->dev; 1521 u32 status, ctrl; 1522 1523 status = queue_readl(queue, ISR); 1524 1525 if (unlikely(!status)) 1526 return IRQ_NONE; 1527 1528 spin_lock(&bp->lock); 1529 1530 while (status) { 1531 /* close possible race with dev_close */ 1532 if (unlikely(!netif_running(dev))) { 1533 queue_writel(queue, IDR, -1); 1534 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1535 queue_writel(queue, ISR, -1); 1536 break; 1537 } 1538 1539 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1540 (unsigned int)(queue - bp->queues), 1541 (unsigned long)status); 1542 1543 if (status & bp->rx_intr_mask) { 1544 /* There's no point taking any more interrupts 1545 * until we have processed the buffers. The 1546 * scheduling call may fail if the poll routine 1547 * is already scheduled, so disable interrupts 1548 * now. 1549 */ 1550 queue_writel(queue, IDR, bp->rx_intr_mask); 1551 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1552 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1553 1554 if (napi_schedule_prep(&queue->napi)) { 1555 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1556 __napi_schedule(&queue->napi); 1557 } 1558 } 1559 1560 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1561 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1562 schedule_work(&queue->tx_error_task); 1563 1564 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1565 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1566 1567 break; 1568 } 1569 1570 if (status & MACB_BIT(TCOMP)) 1571 macb_tx_interrupt(queue); 1572 1573 if (status & MACB_BIT(TXUBR)) 1574 macb_tx_restart(queue); 1575 1576 /* Link change detection isn't possible with RMII, so we'll 1577 * add that if/when we get our hands on a full-blown MII PHY. 1578 */ 1579 1580 /* There is a hardware issue under heavy load where DMA can 1581 * stop, this causes endless "used buffer descriptor read" 1582 * interrupts but it can be cleared by re-enabling RX. See 1583 * the at91rm9200 manual, section 41.3.1 or the Zynq manual 1584 * section 16.7.4 for details. RXUBR is only enabled for 1585 * these two versions. 1586 */ 1587 if (status & MACB_BIT(RXUBR)) { 1588 ctrl = macb_readl(bp, NCR); 1589 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1590 wmb(); 1591 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1592 1593 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1594 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1595 } 1596 1597 if (status & MACB_BIT(ISR_ROVR)) { 1598 /* We missed at least one packet */ 1599 if (macb_is_gem(bp)) 1600 bp->hw_stats.gem.rx_overruns++; 1601 else 1602 bp->hw_stats.macb.rx_overruns++; 1603 1604 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1605 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1606 } 1607 1608 if (status & MACB_BIT(HRESP)) { 1609 tasklet_schedule(&bp->hresp_err_tasklet); 1610 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1611 1612 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1613 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1614 } 1615 status = queue_readl(queue, ISR); 1616 } 1617 1618 spin_unlock(&bp->lock); 1619 1620 return IRQ_HANDLED; 1621 } 1622 1623 #ifdef CONFIG_NET_POLL_CONTROLLER 1624 /* Polling receive - used by netconsole and other diagnostic tools 1625 * to allow network i/o with interrupts disabled. 1626 */ 1627 static void macb_poll_controller(struct net_device *dev) 1628 { 1629 struct macb *bp = netdev_priv(dev); 1630 struct macb_queue *queue; 1631 unsigned long flags; 1632 unsigned int q; 1633 1634 local_irq_save(flags); 1635 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1636 macb_interrupt(dev->irq, queue); 1637 local_irq_restore(flags); 1638 } 1639 #endif 1640 1641 static unsigned int macb_tx_map(struct macb *bp, 1642 struct macb_queue *queue, 1643 struct sk_buff *skb, 1644 unsigned int hdrlen) 1645 { 1646 dma_addr_t mapping; 1647 unsigned int len, entry, i, tx_head = queue->tx_head; 1648 struct macb_tx_skb *tx_skb = NULL; 1649 struct macb_dma_desc *desc; 1650 unsigned int offset, size, count = 0; 1651 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1652 unsigned int eof = 1, mss_mfs = 0; 1653 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1654 1655 /* LSO */ 1656 if (skb_shinfo(skb)->gso_size != 0) { 1657 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1658 /* UDP - UFO */ 1659 lso_ctrl = MACB_LSO_UFO_ENABLE; 1660 else 1661 /* TCP - TSO */ 1662 lso_ctrl = MACB_LSO_TSO_ENABLE; 1663 } 1664 1665 /* First, map non-paged data */ 1666 len = skb_headlen(skb); 1667 1668 /* first buffer length */ 1669 size = hdrlen; 1670 1671 offset = 0; 1672 while (len) { 1673 entry = macb_tx_ring_wrap(bp, tx_head); 1674 tx_skb = &queue->tx_skb[entry]; 1675 1676 mapping = dma_map_single(&bp->pdev->dev, 1677 skb->data + offset, 1678 size, DMA_TO_DEVICE); 1679 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1680 goto dma_error; 1681 1682 /* Save info to properly release resources */ 1683 tx_skb->skb = NULL; 1684 tx_skb->mapping = mapping; 1685 tx_skb->size = size; 1686 tx_skb->mapped_as_page = false; 1687 1688 len -= size; 1689 offset += size; 1690 count++; 1691 tx_head++; 1692 1693 size = min(len, bp->max_tx_length); 1694 } 1695 1696 /* Then, map paged data from fragments */ 1697 for (f = 0; f < nr_frags; f++) { 1698 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1699 1700 len = skb_frag_size(frag); 1701 offset = 0; 1702 while (len) { 1703 size = min(len, bp->max_tx_length); 1704 entry = macb_tx_ring_wrap(bp, tx_head); 1705 tx_skb = &queue->tx_skb[entry]; 1706 1707 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1708 offset, size, DMA_TO_DEVICE); 1709 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1710 goto dma_error; 1711 1712 /* Save info to properly release resources */ 1713 tx_skb->skb = NULL; 1714 tx_skb->mapping = mapping; 1715 tx_skb->size = size; 1716 tx_skb->mapped_as_page = true; 1717 1718 len -= size; 1719 offset += size; 1720 count++; 1721 tx_head++; 1722 } 1723 } 1724 1725 /* Should never happen */ 1726 if (unlikely(!tx_skb)) { 1727 netdev_err(bp->dev, "BUG! empty skb!\n"); 1728 return 0; 1729 } 1730 1731 /* This is the last buffer of the frame: save socket buffer */ 1732 tx_skb->skb = skb; 1733 1734 /* Update TX ring: update buffer descriptors in reverse order 1735 * to avoid race condition 1736 */ 1737 1738 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1739 * to set the end of TX queue 1740 */ 1741 i = tx_head; 1742 entry = macb_tx_ring_wrap(bp, i); 1743 ctrl = MACB_BIT(TX_USED); 1744 desc = macb_tx_desc(queue, entry); 1745 desc->ctrl = ctrl; 1746 1747 if (lso_ctrl) { 1748 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1749 /* include header and FCS in value given to h/w */ 1750 mss_mfs = skb_shinfo(skb)->gso_size + 1751 skb_transport_offset(skb) + 1752 ETH_FCS_LEN; 1753 else /* TSO */ { 1754 mss_mfs = skb_shinfo(skb)->gso_size; 1755 /* TCP Sequence Number Source Select 1756 * can be set only for TSO 1757 */ 1758 seq_ctrl = 0; 1759 } 1760 } 1761 1762 do { 1763 i--; 1764 entry = macb_tx_ring_wrap(bp, i); 1765 tx_skb = &queue->tx_skb[entry]; 1766 desc = macb_tx_desc(queue, entry); 1767 1768 ctrl = (u32)tx_skb->size; 1769 if (eof) { 1770 ctrl |= MACB_BIT(TX_LAST); 1771 eof = 0; 1772 } 1773 if (unlikely(entry == (bp->tx_ring_size - 1))) 1774 ctrl |= MACB_BIT(TX_WRAP); 1775 1776 /* First descriptor is header descriptor */ 1777 if (i == queue->tx_head) { 1778 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1779 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1780 if ((bp->dev->features & NETIF_F_HW_CSUM) && 1781 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) 1782 ctrl |= MACB_BIT(TX_NOCRC); 1783 } else 1784 /* Only set MSS/MFS on payload descriptors 1785 * (second or later descriptor) 1786 */ 1787 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1788 1789 /* Set TX buffer descriptor */ 1790 macb_set_addr(bp, desc, tx_skb->mapping); 1791 /* desc->addr must be visible to hardware before clearing 1792 * 'TX_USED' bit in desc->ctrl. 1793 */ 1794 wmb(); 1795 desc->ctrl = ctrl; 1796 } while (i != queue->tx_head); 1797 1798 queue->tx_head = tx_head; 1799 1800 return count; 1801 1802 dma_error: 1803 netdev_err(bp->dev, "TX DMA map failed\n"); 1804 1805 for (i = queue->tx_head; i != tx_head; i++) { 1806 tx_skb = macb_tx_skb(queue, i); 1807 1808 macb_tx_unmap(bp, tx_skb); 1809 } 1810 1811 return 0; 1812 } 1813 1814 static netdev_features_t macb_features_check(struct sk_buff *skb, 1815 struct net_device *dev, 1816 netdev_features_t features) 1817 { 1818 unsigned int nr_frags, f; 1819 unsigned int hdrlen; 1820 1821 /* Validate LSO compatibility */ 1822 1823 /* there is only one buffer or protocol is not UDP */ 1824 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) 1825 return features; 1826 1827 /* length of header */ 1828 hdrlen = skb_transport_offset(skb); 1829 1830 /* For UFO only: 1831 * When software supplies two or more payload buffers all payload buffers 1832 * apart from the last must be a multiple of 8 bytes in size. 1833 */ 1834 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 1835 return features & ~MACB_NETIF_LSO; 1836 1837 nr_frags = skb_shinfo(skb)->nr_frags; 1838 /* No need to check last fragment */ 1839 nr_frags--; 1840 for (f = 0; f < nr_frags; f++) { 1841 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1842 1843 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 1844 return features & ~MACB_NETIF_LSO; 1845 } 1846 return features; 1847 } 1848 1849 static inline int macb_clear_csum(struct sk_buff *skb) 1850 { 1851 /* no change for packets without checksum offloading */ 1852 if (skb->ip_summed != CHECKSUM_PARTIAL) 1853 return 0; 1854 1855 /* make sure we can modify the header */ 1856 if (unlikely(skb_cow_head(skb, 0))) 1857 return -1; 1858 1859 /* initialize checksum field 1860 * This is required - at least for Zynq, which otherwise calculates 1861 * wrong UDP header checksums for UDP packets with UDP data len <=2 1862 */ 1863 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1864 return 0; 1865 } 1866 1867 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) 1868 { 1869 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); 1870 int padlen = ETH_ZLEN - (*skb)->len; 1871 int headroom = skb_headroom(*skb); 1872 int tailroom = skb_tailroom(*skb); 1873 struct sk_buff *nskb; 1874 u32 fcs; 1875 1876 if (!(ndev->features & NETIF_F_HW_CSUM) || 1877 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || 1878 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ 1879 return 0; 1880 1881 if (padlen <= 0) { 1882 /* FCS could be appeded to tailroom. */ 1883 if (tailroom >= ETH_FCS_LEN) 1884 goto add_fcs; 1885 /* FCS could be appeded by moving data to headroom. */ 1886 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 1887 padlen = 0; 1888 /* No room for FCS, need to reallocate skb. */ 1889 else 1890 padlen = ETH_FCS_LEN; 1891 } else { 1892 /* Add room for FCS. */ 1893 padlen += ETH_FCS_LEN; 1894 } 1895 1896 if (!cloned && headroom + tailroom >= padlen) { 1897 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 1898 skb_set_tail_pointer(*skb, (*skb)->len); 1899 } else { 1900 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 1901 if (!nskb) 1902 return -ENOMEM; 1903 1904 dev_consume_skb_any(*skb); 1905 *skb = nskb; 1906 } 1907 1908 if (padlen > ETH_FCS_LEN) 1909 skb_put_zero(*skb, padlen - ETH_FCS_LEN); 1910 1911 add_fcs: 1912 /* set FCS to packet */ 1913 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); 1914 fcs = ~fcs; 1915 1916 skb_put_u8(*skb, fcs & 0xff); 1917 skb_put_u8(*skb, (fcs >> 8) & 0xff); 1918 skb_put_u8(*skb, (fcs >> 16) & 0xff); 1919 skb_put_u8(*skb, (fcs >> 24) & 0xff); 1920 1921 return 0; 1922 } 1923 1924 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1925 { 1926 u16 queue_index = skb_get_queue_mapping(skb); 1927 struct macb *bp = netdev_priv(dev); 1928 struct macb_queue *queue = &bp->queues[queue_index]; 1929 unsigned long flags; 1930 unsigned int desc_cnt, nr_frags, frag_size, f; 1931 unsigned int hdrlen; 1932 bool is_lso, is_udp = 0; 1933 netdev_tx_t ret = NETDEV_TX_OK; 1934 1935 if (macb_clear_csum(skb)) { 1936 dev_kfree_skb_any(skb); 1937 return ret; 1938 } 1939 1940 if (macb_pad_and_fcs(&skb, dev)) { 1941 dev_kfree_skb_any(skb); 1942 return ret; 1943 } 1944 1945 is_lso = (skb_shinfo(skb)->gso_size != 0); 1946 1947 if (is_lso) { 1948 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); 1949 1950 /* length of headers */ 1951 if (is_udp) 1952 /* only queue eth + ip headers separately for UDP */ 1953 hdrlen = skb_transport_offset(skb); 1954 else 1955 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 1956 if (skb_headlen(skb) < hdrlen) { 1957 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 1958 /* if this is required, would need to copy to single buffer */ 1959 return NETDEV_TX_BUSY; 1960 } 1961 } else 1962 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 1963 1964 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1965 netdev_vdbg(bp->dev, 1966 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1967 queue_index, skb->len, skb->head, skb->data, 1968 skb_tail_pointer(skb), skb_end_pointer(skb)); 1969 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1970 skb->data, 16, true); 1971 #endif 1972 1973 /* Count how many TX buffer descriptors are needed to send this 1974 * socket buffer: skb fragments of jumbo frames may need to be 1975 * split into many buffer descriptors. 1976 */ 1977 if (is_lso && (skb_headlen(skb) > hdrlen)) 1978 /* extra header descriptor if also payload in first buffer */ 1979 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 1980 else 1981 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1982 nr_frags = skb_shinfo(skb)->nr_frags; 1983 for (f = 0; f < nr_frags; f++) { 1984 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1985 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1986 } 1987 1988 spin_lock_irqsave(&bp->lock, flags); 1989 1990 /* This is a hard error, log it. */ 1991 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 1992 bp->tx_ring_size) < desc_cnt) { 1993 netif_stop_subqueue(dev, queue_index); 1994 spin_unlock_irqrestore(&bp->lock, flags); 1995 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1996 queue->tx_head, queue->tx_tail); 1997 return NETDEV_TX_BUSY; 1998 } 1999 2000 /* Map socket buffer for DMA transfer */ 2001 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 2002 dev_kfree_skb_any(skb); 2003 goto unlock; 2004 } 2005 2006 /* Make newly initialized descriptor visible to hardware */ 2007 wmb(); 2008 skb_tx_timestamp(skb); 2009 2010 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 2011 2012 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 2013 netif_stop_subqueue(dev, queue_index); 2014 2015 unlock: 2016 spin_unlock_irqrestore(&bp->lock, flags); 2017 2018 return ret; 2019 } 2020 2021 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 2022 { 2023 if (!macb_is_gem(bp)) { 2024 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 2025 } else { 2026 bp->rx_buffer_size = size; 2027 2028 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 2029 netdev_dbg(bp->dev, 2030 "RX buffer must be multiple of %d bytes, expanding\n", 2031 RX_BUFFER_MULTIPLE); 2032 bp->rx_buffer_size = 2033 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 2034 } 2035 } 2036 2037 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 2038 bp->dev->mtu, bp->rx_buffer_size); 2039 } 2040 2041 static void gem_free_rx_buffers(struct macb *bp) 2042 { 2043 struct sk_buff *skb; 2044 struct macb_dma_desc *desc; 2045 struct macb_queue *queue; 2046 dma_addr_t addr; 2047 unsigned int q; 2048 int i; 2049 2050 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2051 if (!queue->rx_skbuff) 2052 continue; 2053 2054 for (i = 0; i < bp->rx_ring_size; i++) { 2055 skb = queue->rx_skbuff[i]; 2056 2057 if (!skb) 2058 continue; 2059 2060 desc = macb_rx_desc(queue, i); 2061 addr = macb_get_addr(bp, desc); 2062 2063 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 2064 DMA_FROM_DEVICE); 2065 dev_kfree_skb_any(skb); 2066 skb = NULL; 2067 } 2068 2069 kfree(queue->rx_skbuff); 2070 queue->rx_skbuff = NULL; 2071 } 2072 } 2073 2074 static void macb_free_rx_buffers(struct macb *bp) 2075 { 2076 struct macb_queue *queue = &bp->queues[0]; 2077 2078 if (queue->rx_buffers) { 2079 dma_free_coherent(&bp->pdev->dev, 2080 bp->rx_ring_size * bp->rx_buffer_size, 2081 queue->rx_buffers, queue->rx_buffers_dma); 2082 queue->rx_buffers = NULL; 2083 } 2084 } 2085 2086 static void macb_free_consistent(struct macb *bp) 2087 { 2088 struct macb_queue *queue; 2089 unsigned int q; 2090 int size; 2091 2092 bp->macbgem_ops.mog_free_rx_buffers(bp); 2093 2094 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2095 kfree(queue->tx_skb); 2096 queue->tx_skb = NULL; 2097 if (queue->tx_ring) { 2098 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2099 dma_free_coherent(&bp->pdev->dev, size, 2100 queue->tx_ring, queue->tx_ring_dma); 2101 queue->tx_ring = NULL; 2102 } 2103 if (queue->rx_ring) { 2104 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2105 dma_free_coherent(&bp->pdev->dev, size, 2106 queue->rx_ring, queue->rx_ring_dma); 2107 queue->rx_ring = NULL; 2108 } 2109 } 2110 } 2111 2112 static int gem_alloc_rx_buffers(struct macb *bp) 2113 { 2114 struct macb_queue *queue; 2115 unsigned int q; 2116 int size; 2117 2118 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2119 size = bp->rx_ring_size * sizeof(struct sk_buff *); 2120 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); 2121 if (!queue->rx_skbuff) 2122 return -ENOMEM; 2123 else 2124 netdev_dbg(bp->dev, 2125 "Allocated %d RX struct sk_buff entries at %p\n", 2126 bp->rx_ring_size, queue->rx_skbuff); 2127 } 2128 return 0; 2129 } 2130 2131 static int macb_alloc_rx_buffers(struct macb *bp) 2132 { 2133 struct macb_queue *queue = &bp->queues[0]; 2134 int size; 2135 2136 size = bp->rx_ring_size * bp->rx_buffer_size; 2137 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 2138 &queue->rx_buffers_dma, GFP_KERNEL); 2139 if (!queue->rx_buffers) 2140 return -ENOMEM; 2141 2142 netdev_dbg(bp->dev, 2143 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 2144 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); 2145 return 0; 2146 } 2147 2148 static int macb_alloc_consistent(struct macb *bp) 2149 { 2150 struct macb_queue *queue; 2151 unsigned int q; 2152 int size; 2153 2154 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2155 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2156 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2157 &queue->tx_ring_dma, 2158 GFP_KERNEL); 2159 if (!queue->tx_ring) 2160 goto out_err; 2161 netdev_dbg(bp->dev, 2162 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 2163 q, size, (unsigned long)queue->tx_ring_dma, 2164 queue->tx_ring); 2165 2166 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 2167 queue->tx_skb = kmalloc(size, GFP_KERNEL); 2168 if (!queue->tx_skb) 2169 goto out_err; 2170 2171 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2172 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2173 &queue->rx_ring_dma, GFP_KERNEL); 2174 if (!queue->rx_ring) 2175 goto out_err; 2176 netdev_dbg(bp->dev, 2177 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 2178 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); 2179 } 2180 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 2181 goto out_err; 2182 2183 return 0; 2184 2185 out_err: 2186 macb_free_consistent(bp); 2187 return -ENOMEM; 2188 } 2189 2190 static void gem_init_rings(struct macb *bp) 2191 { 2192 struct macb_queue *queue; 2193 struct macb_dma_desc *desc = NULL; 2194 unsigned int q; 2195 int i; 2196 2197 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2198 for (i = 0; i < bp->tx_ring_size; i++) { 2199 desc = macb_tx_desc(queue, i); 2200 macb_set_addr(bp, desc, 0); 2201 desc->ctrl = MACB_BIT(TX_USED); 2202 } 2203 desc->ctrl |= MACB_BIT(TX_WRAP); 2204 queue->tx_head = 0; 2205 queue->tx_tail = 0; 2206 2207 queue->rx_tail = 0; 2208 queue->rx_prepared_head = 0; 2209 2210 gem_rx_refill(queue); 2211 } 2212 2213 } 2214 2215 static void macb_init_rings(struct macb *bp) 2216 { 2217 int i; 2218 struct macb_dma_desc *desc = NULL; 2219 2220 macb_init_rx_ring(&bp->queues[0]); 2221 2222 for (i = 0; i < bp->tx_ring_size; i++) { 2223 desc = macb_tx_desc(&bp->queues[0], i); 2224 macb_set_addr(bp, desc, 0); 2225 desc->ctrl = MACB_BIT(TX_USED); 2226 } 2227 bp->queues[0].tx_head = 0; 2228 bp->queues[0].tx_tail = 0; 2229 desc->ctrl |= MACB_BIT(TX_WRAP); 2230 } 2231 2232 static void macb_reset_hw(struct macb *bp) 2233 { 2234 struct macb_queue *queue; 2235 unsigned int q; 2236 u32 ctrl = macb_readl(bp, NCR); 2237 2238 /* Disable RX and TX (XXX: Should we halt the transmission 2239 * more gracefully?) 2240 */ 2241 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 2242 2243 /* Clear the stats registers (XXX: Update stats first?) */ 2244 ctrl |= MACB_BIT(CLRSTAT); 2245 2246 macb_writel(bp, NCR, ctrl); 2247 2248 /* Clear all status flags */ 2249 macb_writel(bp, TSR, -1); 2250 macb_writel(bp, RSR, -1); 2251 2252 /* Disable all interrupts */ 2253 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2254 queue_writel(queue, IDR, -1); 2255 queue_readl(queue, ISR); 2256 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 2257 queue_writel(queue, ISR, -1); 2258 } 2259 } 2260 2261 static u32 gem_mdc_clk_div(struct macb *bp) 2262 { 2263 u32 config; 2264 unsigned long pclk_hz = clk_get_rate(bp->pclk); 2265 2266 if (pclk_hz <= 20000000) 2267 config = GEM_BF(CLK, GEM_CLK_DIV8); 2268 else if (pclk_hz <= 40000000) 2269 config = GEM_BF(CLK, GEM_CLK_DIV16); 2270 else if (pclk_hz <= 80000000) 2271 config = GEM_BF(CLK, GEM_CLK_DIV32); 2272 else if (pclk_hz <= 120000000) 2273 config = GEM_BF(CLK, GEM_CLK_DIV48); 2274 else if (pclk_hz <= 160000000) 2275 config = GEM_BF(CLK, GEM_CLK_DIV64); 2276 else 2277 config = GEM_BF(CLK, GEM_CLK_DIV96); 2278 2279 return config; 2280 } 2281 2282 static u32 macb_mdc_clk_div(struct macb *bp) 2283 { 2284 u32 config; 2285 unsigned long pclk_hz; 2286 2287 if (macb_is_gem(bp)) 2288 return gem_mdc_clk_div(bp); 2289 2290 pclk_hz = clk_get_rate(bp->pclk); 2291 if (pclk_hz <= 20000000) 2292 config = MACB_BF(CLK, MACB_CLK_DIV8); 2293 else if (pclk_hz <= 40000000) 2294 config = MACB_BF(CLK, MACB_CLK_DIV16); 2295 else if (pclk_hz <= 80000000) 2296 config = MACB_BF(CLK, MACB_CLK_DIV32); 2297 else 2298 config = MACB_BF(CLK, MACB_CLK_DIV64); 2299 2300 return config; 2301 } 2302 2303 /* Get the DMA bus width field of the network configuration register that we 2304 * should program. We find the width from decoding the design configuration 2305 * register to find the maximum supported data bus width. 2306 */ 2307 static u32 macb_dbw(struct macb *bp) 2308 { 2309 if (!macb_is_gem(bp)) 2310 return 0; 2311 2312 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 2313 case 4: 2314 return GEM_BF(DBW, GEM_DBW128); 2315 case 2: 2316 return GEM_BF(DBW, GEM_DBW64); 2317 case 1: 2318 default: 2319 return GEM_BF(DBW, GEM_DBW32); 2320 } 2321 } 2322 2323 /* Configure the receive DMA engine 2324 * - use the correct receive buffer size 2325 * - set best burst length for DMA operations 2326 * (if not supported by FIFO, it will fallback to default) 2327 * - set both rx/tx packet buffers to full memory size 2328 * These are configurable parameters for GEM. 2329 */ 2330 static void macb_configure_dma(struct macb *bp) 2331 { 2332 struct macb_queue *queue; 2333 u32 buffer_size; 2334 unsigned int q; 2335 u32 dmacfg; 2336 2337 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; 2338 if (macb_is_gem(bp)) { 2339 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 2340 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2341 if (q) 2342 queue_writel(queue, RBQS, buffer_size); 2343 else 2344 dmacfg |= GEM_BF(RXBS, buffer_size); 2345 } 2346 if (bp->dma_burst_length) 2347 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 2348 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 2349 dmacfg &= ~GEM_BIT(ENDIA_PKT); 2350 2351 if (bp->native_io) 2352 dmacfg &= ~GEM_BIT(ENDIA_DESC); 2353 else 2354 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 2355 2356 if (bp->dev->features & NETIF_F_HW_CSUM) 2357 dmacfg |= GEM_BIT(TXCOEN); 2358 else 2359 dmacfg &= ~GEM_BIT(TXCOEN); 2360 2361 dmacfg &= ~GEM_BIT(ADDR64); 2362 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2363 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2364 dmacfg |= GEM_BIT(ADDR64); 2365 #endif 2366 #ifdef CONFIG_MACB_USE_HWSTAMP 2367 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2368 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2369 #endif 2370 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2371 dmacfg); 2372 gem_writel(bp, DMACFG, dmacfg); 2373 } 2374 } 2375 2376 static void macb_init_hw(struct macb *bp) 2377 { 2378 u32 config; 2379 2380 macb_reset_hw(bp); 2381 macb_set_hwaddr(bp); 2382 2383 config = macb_mdc_clk_div(bp); 2384 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2385 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2386 if (bp->caps & MACB_CAPS_JUMBO) 2387 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2388 else 2389 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2390 if (bp->dev->flags & IFF_PROMISC) 2391 config |= MACB_BIT(CAF); /* Copy All Frames */ 2392 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2393 config |= GEM_BIT(RXCOEN); 2394 if (!(bp->dev->flags & IFF_BROADCAST)) 2395 config |= MACB_BIT(NBC); /* No BroadCast */ 2396 config |= macb_dbw(bp); 2397 macb_writel(bp, NCFGR, config); 2398 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2399 gem_writel(bp, JML, bp->jumbo_max_len); 2400 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2401 if (bp->caps & MACB_CAPS_JUMBO) 2402 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2403 2404 macb_configure_dma(bp); 2405 } 2406 2407 /* The hash address register is 64 bits long and takes up two 2408 * locations in the memory map. The least significant bits are stored 2409 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2410 * 2411 * The unicast hash enable and the multicast hash enable bits in the 2412 * network configuration register enable the reception of hash matched 2413 * frames. The destination address is reduced to a 6 bit index into 2414 * the 64 bit hash register using the following hash function. The 2415 * hash function is an exclusive or of every sixth bit of the 2416 * destination address. 2417 * 2418 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2419 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2420 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2421 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2422 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2423 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2424 * 2425 * da[0] represents the least significant bit of the first byte 2426 * received, that is, the multicast/unicast indicator, and da[47] 2427 * represents the most significant bit of the last byte received. If 2428 * the hash index, hi[n], points to a bit that is set in the hash 2429 * register then the frame will be matched according to whether the 2430 * frame is multicast or unicast. A multicast match will be signalled 2431 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2432 * index points to a bit set in the hash register. A unicast match 2433 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2434 * and the hash index points to a bit set in the hash register. To 2435 * receive all multicast frames, the hash register should be set with 2436 * all ones and the multicast hash enable bit should be set in the 2437 * network configuration register. 2438 */ 2439 2440 static inline int hash_bit_value(int bitnr, __u8 *addr) 2441 { 2442 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2443 return 1; 2444 return 0; 2445 } 2446 2447 /* Return the hash index value for the specified address. */ 2448 static int hash_get_index(__u8 *addr) 2449 { 2450 int i, j, bitval; 2451 int hash_index = 0; 2452 2453 for (j = 0; j < 6; j++) { 2454 for (i = 0, bitval = 0; i < 8; i++) 2455 bitval ^= hash_bit_value(i * 6 + j, addr); 2456 2457 hash_index |= (bitval << j); 2458 } 2459 2460 return hash_index; 2461 } 2462 2463 /* Add multicast addresses to the internal multicast-hash table. */ 2464 static void macb_sethashtable(struct net_device *dev) 2465 { 2466 struct netdev_hw_addr *ha; 2467 unsigned long mc_filter[2]; 2468 unsigned int bitnr; 2469 struct macb *bp = netdev_priv(dev); 2470 2471 mc_filter[0] = 0; 2472 mc_filter[1] = 0; 2473 2474 netdev_for_each_mc_addr(ha, dev) { 2475 bitnr = hash_get_index(ha->addr); 2476 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2477 } 2478 2479 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2480 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2481 } 2482 2483 /* Enable/Disable promiscuous and multicast modes. */ 2484 static void macb_set_rx_mode(struct net_device *dev) 2485 { 2486 unsigned long cfg; 2487 struct macb *bp = netdev_priv(dev); 2488 2489 cfg = macb_readl(bp, NCFGR); 2490 2491 if (dev->flags & IFF_PROMISC) { 2492 /* Enable promiscuous mode */ 2493 cfg |= MACB_BIT(CAF); 2494 2495 /* Disable RX checksum offload */ 2496 if (macb_is_gem(bp)) 2497 cfg &= ~GEM_BIT(RXCOEN); 2498 } else { 2499 /* Disable promiscuous mode */ 2500 cfg &= ~MACB_BIT(CAF); 2501 2502 /* Enable RX checksum offload only if requested */ 2503 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2504 cfg |= GEM_BIT(RXCOEN); 2505 } 2506 2507 if (dev->flags & IFF_ALLMULTI) { 2508 /* Enable all multicast mode */ 2509 macb_or_gem_writel(bp, HRB, -1); 2510 macb_or_gem_writel(bp, HRT, -1); 2511 cfg |= MACB_BIT(NCFGR_MTI); 2512 } else if (!netdev_mc_empty(dev)) { 2513 /* Enable specific multicasts */ 2514 macb_sethashtable(dev); 2515 cfg |= MACB_BIT(NCFGR_MTI); 2516 } else if (dev->flags & (~IFF_ALLMULTI)) { 2517 /* Disable all multicast mode */ 2518 macb_or_gem_writel(bp, HRB, 0); 2519 macb_or_gem_writel(bp, HRT, 0); 2520 cfg &= ~MACB_BIT(NCFGR_MTI); 2521 } 2522 2523 macb_writel(bp, NCFGR, cfg); 2524 } 2525 2526 static int macb_open(struct net_device *dev) 2527 { 2528 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2529 struct macb *bp = netdev_priv(dev); 2530 struct macb_queue *queue; 2531 unsigned int q; 2532 int err; 2533 2534 netdev_dbg(bp->dev, "open\n"); 2535 2536 err = pm_runtime_get_sync(&bp->pdev->dev); 2537 if (err < 0) 2538 goto pm_exit; 2539 2540 /* RX buffers initialization */ 2541 macb_init_rx_buffer_size(bp, bufsz); 2542 2543 err = macb_alloc_consistent(bp); 2544 if (err) { 2545 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2546 err); 2547 goto pm_exit; 2548 } 2549 2550 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2551 napi_enable(&queue->napi); 2552 2553 macb_init_hw(bp); 2554 2555 err = macb_phylink_connect(bp); 2556 if (err) 2557 goto pm_exit; 2558 2559 netif_tx_start_all_queues(dev); 2560 2561 if (bp->ptp_info) 2562 bp->ptp_info->ptp_init(dev); 2563 2564 pm_exit: 2565 if (err) { 2566 pm_runtime_put_sync(&bp->pdev->dev); 2567 return err; 2568 } 2569 return 0; 2570 } 2571 2572 static int macb_close(struct net_device *dev) 2573 { 2574 struct macb *bp = netdev_priv(dev); 2575 struct macb_queue *queue; 2576 unsigned long flags; 2577 unsigned int q; 2578 2579 netif_tx_stop_all_queues(dev); 2580 2581 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2582 napi_disable(&queue->napi); 2583 2584 phylink_stop(bp->phylink); 2585 phylink_disconnect_phy(bp->phylink); 2586 2587 spin_lock_irqsave(&bp->lock, flags); 2588 macb_reset_hw(bp); 2589 netif_carrier_off(dev); 2590 spin_unlock_irqrestore(&bp->lock, flags); 2591 2592 macb_free_consistent(bp); 2593 2594 if (bp->ptp_info) 2595 bp->ptp_info->ptp_remove(dev); 2596 2597 pm_runtime_put(&bp->pdev->dev); 2598 2599 return 0; 2600 } 2601 2602 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2603 { 2604 if (netif_running(dev)) 2605 return -EBUSY; 2606 2607 dev->mtu = new_mtu; 2608 2609 return 0; 2610 } 2611 2612 static void gem_update_stats(struct macb *bp) 2613 { 2614 struct macb_queue *queue; 2615 unsigned int i, q, idx; 2616 unsigned long *stat; 2617 2618 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2619 2620 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2621 u32 offset = gem_statistics[i].offset; 2622 u64 val = bp->macb_reg_readl(bp, offset); 2623 2624 bp->ethtool_stats[i] += val; 2625 *p += val; 2626 2627 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2628 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2629 val = bp->macb_reg_readl(bp, offset + 4); 2630 bp->ethtool_stats[i] += ((u64)val) << 32; 2631 *(++p) += val; 2632 } 2633 } 2634 2635 idx = GEM_STATS_LEN; 2636 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2637 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) 2638 bp->ethtool_stats[idx++] = *stat; 2639 } 2640 2641 static struct net_device_stats *gem_get_stats(struct macb *bp) 2642 { 2643 struct gem_stats *hwstat = &bp->hw_stats.gem; 2644 struct net_device_stats *nstat = &bp->dev->stats; 2645 2646 gem_update_stats(bp); 2647 2648 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2649 hwstat->rx_alignment_errors + 2650 hwstat->rx_resource_errors + 2651 hwstat->rx_overruns + 2652 hwstat->rx_oversize_frames + 2653 hwstat->rx_jabbers + 2654 hwstat->rx_undersized_frames + 2655 hwstat->rx_length_field_frame_errors); 2656 nstat->tx_errors = (hwstat->tx_late_collisions + 2657 hwstat->tx_excessive_collisions + 2658 hwstat->tx_underrun + 2659 hwstat->tx_carrier_sense_errors); 2660 nstat->multicast = hwstat->rx_multicast_frames; 2661 nstat->collisions = (hwstat->tx_single_collision_frames + 2662 hwstat->tx_multiple_collision_frames + 2663 hwstat->tx_excessive_collisions); 2664 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2665 hwstat->rx_jabbers + 2666 hwstat->rx_undersized_frames + 2667 hwstat->rx_length_field_frame_errors); 2668 nstat->rx_over_errors = hwstat->rx_resource_errors; 2669 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2670 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2671 nstat->rx_fifo_errors = hwstat->rx_overruns; 2672 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2673 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2674 nstat->tx_fifo_errors = hwstat->tx_underrun; 2675 2676 return nstat; 2677 } 2678 2679 static void gem_get_ethtool_stats(struct net_device *dev, 2680 struct ethtool_stats *stats, u64 *data) 2681 { 2682 struct macb *bp; 2683 2684 bp = netdev_priv(dev); 2685 gem_update_stats(bp); 2686 memcpy(data, &bp->ethtool_stats, sizeof(u64) 2687 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); 2688 } 2689 2690 static int gem_get_sset_count(struct net_device *dev, int sset) 2691 { 2692 struct macb *bp = netdev_priv(dev); 2693 2694 switch (sset) { 2695 case ETH_SS_STATS: 2696 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; 2697 default: 2698 return -EOPNOTSUPP; 2699 } 2700 } 2701 2702 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2703 { 2704 char stat_string[ETH_GSTRING_LEN]; 2705 struct macb *bp = netdev_priv(dev); 2706 struct macb_queue *queue; 2707 unsigned int i; 2708 unsigned int q; 2709 2710 switch (sset) { 2711 case ETH_SS_STATS: 2712 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2713 memcpy(p, gem_statistics[i].stat_string, 2714 ETH_GSTRING_LEN); 2715 2716 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2717 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { 2718 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", 2719 q, queue_statistics[i].stat_string); 2720 memcpy(p, stat_string, ETH_GSTRING_LEN); 2721 } 2722 } 2723 break; 2724 } 2725 } 2726 2727 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2728 { 2729 struct macb *bp = netdev_priv(dev); 2730 struct net_device_stats *nstat = &bp->dev->stats; 2731 struct macb_stats *hwstat = &bp->hw_stats.macb; 2732 2733 if (macb_is_gem(bp)) 2734 return gem_get_stats(bp); 2735 2736 /* read stats from hardware */ 2737 macb_update_stats(bp); 2738 2739 /* Convert HW stats into netdevice stats */ 2740 nstat->rx_errors = (hwstat->rx_fcs_errors + 2741 hwstat->rx_align_errors + 2742 hwstat->rx_resource_errors + 2743 hwstat->rx_overruns + 2744 hwstat->rx_oversize_pkts + 2745 hwstat->rx_jabbers + 2746 hwstat->rx_undersize_pkts + 2747 hwstat->rx_length_mismatch); 2748 nstat->tx_errors = (hwstat->tx_late_cols + 2749 hwstat->tx_excessive_cols + 2750 hwstat->tx_underruns + 2751 hwstat->tx_carrier_errors + 2752 hwstat->sqe_test_errors); 2753 nstat->collisions = (hwstat->tx_single_cols + 2754 hwstat->tx_multiple_cols + 2755 hwstat->tx_excessive_cols); 2756 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2757 hwstat->rx_jabbers + 2758 hwstat->rx_undersize_pkts + 2759 hwstat->rx_length_mismatch); 2760 nstat->rx_over_errors = hwstat->rx_resource_errors + 2761 hwstat->rx_overruns; 2762 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2763 nstat->rx_frame_errors = hwstat->rx_align_errors; 2764 nstat->rx_fifo_errors = hwstat->rx_overruns; 2765 /* XXX: What does "missed" mean? */ 2766 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2767 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2768 nstat->tx_fifo_errors = hwstat->tx_underruns; 2769 /* Don't know about heartbeat or window errors... */ 2770 2771 return nstat; 2772 } 2773 2774 static int macb_get_regs_len(struct net_device *netdev) 2775 { 2776 return MACB_GREGS_NBR * sizeof(u32); 2777 } 2778 2779 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2780 void *p) 2781 { 2782 struct macb *bp = netdev_priv(dev); 2783 unsigned int tail, head; 2784 u32 *regs_buff = p; 2785 2786 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2787 | MACB_GREGS_VERSION; 2788 2789 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2790 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2791 2792 regs_buff[0] = macb_readl(bp, NCR); 2793 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2794 regs_buff[2] = macb_readl(bp, NSR); 2795 regs_buff[3] = macb_readl(bp, TSR); 2796 regs_buff[4] = macb_readl(bp, RBQP); 2797 regs_buff[5] = macb_readl(bp, TBQP); 2798 regs_buff[6] = macb_readl(bp, RSR); 2799 regs_buff[7] = macb_readl(bp, IMR); 2800 2801 regs_buff[8] = tail; 2802 regs_buff[9] = head; 2803 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2804 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2805 2806 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2807 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2808 if (macb_is_gem(bp)) 2809 regs_buff[13] = gem_readl(bp, DMACFG); 2810 } 2811 2812 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2813 { 2814 struct macb *bp = netdev_priv(netdev); 2815 2816 wol->supported = 0; 2817 wol->wolopts = 0; 2818 2819 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) 2820 phylink_ethtool_get_wol(bp->phylink, wol); 2821 } 2822 2823 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2824 { 2825 struct macb *bp = netdev_priv(netdev); 2826 int ret; 2827 2828 ret = phylink_ethtool_set_wol(bp->phylink, wol); 2829 if (!ret) 2830 return 0; 2831 2832 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2833 (wol->wolopts & ~WAKE_MAGIC)) 2834 return -EOPNOTSUPP; 2835 2836 if (wol->wolopts & WAKE_MAGIC) 2837 bp->wol |= MACB_WOL_ENABLED; 2838 else 2839 bp->wol &= ~MACB_WOL_ENABLED; 2840 2841 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2842 2843 return 0; 2844 } 2845 2846 static int macb_get_link_ksettings(struct net_device *netdev, 2847 struct ethtool_link_ksettings *kset) 2848 { 2849 struct macb *bp = netdev_priv(netdev); 2850 2851 return phylink_ethtool_ksettings_get(bp->phylink, kset); 2852 } 2853 2854 static int macb_set_link_ksettings(struct net_device *netdev, 2855 const struct ethtool_link_ksettings *kset) 2856 { 2857 struct macb *bp = netdev_priv(netdev); 2858 2859 return phylink_ethtool_ksettings_set(bp->phylink, kset); 2860 } 2861 2862 static void macb_get_ringparam(struct net_device *netdev, 2863 struct ethtool_ringparam *ring) 2864 { 2865 struct macb *bp = netdev_priv(netdev); 2866 2867 ring->rx_max_pending = MAX_RX_RING_SIZE; 2868 ring->tx_max_pending = MAX_TX_RING_SIZE; 2869 2870 ring->rx_pending = bp->rx_ring_size; 2871 ring->tx_pending = bp->tx_ring_size; 2872 } 2873 2874 static int macb_set_ringparam(struct net_device *netdev, 2875 struct ethtool_ringparam *ring) 2876 { 2877 struct macb *bp = netdev_priv(netdev); 2878 u32 new_rx_size, new_tx_size; 2879 unsigned int reset = 0; 2880 2881 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2882 return -EINVAL; 2883 2884 new_rx_size = clamp_t(u32, ring->rx_pending, 2885 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 2886 new_rx_size = roundup_pow_of_two(new_rx_size); 2887 2888 new_tx_size = clamp_t(u32, ring->tx_pending, 2889 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 2890 new_tx_size = roundup_pow_of_two(new_tx_size); 2891 2892 if ((new_tx_size == bp->tx_ring_size) && 2893 (new_rx_size == bp->rx_ring_size)) { 2894 /* nothing to do */ 2895 return 0; 2896 } 2897 2898 if (netif_running(bp->dev)) { 2899 reset = 1; 2900 macb_close(bp->dev); 2901 } 2902 2903 bp->rx_ring_size = new_rx_size; 2904 bp->tx_ring_size = new_tx_size; 2905 2906 if (reset) 2907 macb_open(bp->dev); 2908 2909 return 0; 2910 } 2911 2912 #ifdef CONFIG_MACB_USE_HWSTAMP 2913 static unsigned int gem_get_tsu_rate(struct macb *bp) 2914 { 2915 struct clk *tsu_clk; 2916 unsigned int tsu_rate; 2917 2918 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 2919 if (!IS_ERR(tsu_clk)) 2920 tsu_rate = clk_get_rate(tsu_clk); 2921 /* try pclk instead */ 2922 else if (!IS_ERR(bp->pclk)) { 2923 tsu_clk = bp->pclk; 2924 tsu_rate = clk_get_rate(tsu_clk); 2925 } else 2926 return -ENOTSUPP; 2927 return tsu_rate; 2928 } 2929 2930 static s32 gem_get_ptp_max_adj(void) 2931 { 2932 return 64000000; 2933 } 2934 2935 static int gem_get_ts_info(struct net_device *dev, 2936 struct ethtool_ts_info *info) 2937 { 2938 struct macb *bp = netdev_priv(dev); 2939 2940 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 2941 ethtool_op_get_ts_info(dev, info); 2942 return 0; 2943 } 2944 2945 info->so_timestamping = 2946 SOF_TIMESTAMPING_TX_SOFTWARE | 2947 SOF_TIMESTAMPING_RX_SOFTWARE | 2948 SOF_TIMESTAMPING_SOFTWARE | 2949 SOF_TIMESTAMPING_TX_HARDWARE | 2950 SOF_TIMESTAMPING_RX_HARDWARE | 2951 SOF_TIMESTAMPING_RAW_HARDWARE; 2952 info->tx_types = 2953 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 2954 (1 << HWTSTAMP_TX_OFF) | 2955 (1 << HWTSTAMP_TX_ON); 2956 info->rx_filters = 2957 (1 << HWTSTAMP_FILTER_NONE) | 2958 (1 << HWTSTAMP_FILTER_ALL); 2959 2960 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 2961 2962 return 0; 2963 } 2964 2965 static struct macb_ptp_info gem_ptp_info = { 2966 .ptp_init = gem_ptp_init, 2967 .ptp_remove = gem_ptp_remove, 2968 .get_ptp_max_adj = gem_get_ptp_max_adj, 2969 .get_tsu_rate = gem_get_tsu_rate, 2970 .get_ts_info = gem_get_ts_info, 2971 .get_hwtst = gem_get_hwtst, 2972 .set_hwtst = gem_set_hwtst, 2973 }; 2974 #endif 2975 2976 static int macb_get_ts_info(struct net_device *netdev, 2977 struct ethtool_ts_info *info) 2978 { 2979 struct macb *bp = netdev_priv(netdev); 2980 2981 if (bp->ptp_info) 2982 return bp->ptp_info->get_ts_info(netdev, info); 2983 2984 return ethtool_op_get_ts_info(netdev, info); 2985 } 2986 2987 static void gem_enable_flow_filters(struct macb *bp, bool enable) 2988 { 2989 struct net_device *netdev = bp->dev; 2990 struct ethtool_rx_fs_item *item; 2991 u32 t2_scr; 2992 int num_t2_scr; 2993 2994 if (!(netdev->features & NETIF_F_NTUPLE)) 2995 return; 2996 2997 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); 2998 2999 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3000 struct ethtool_rx_flow_spec *fs = &item->fs; 3001 struct ethtool_tcpip4_spec *tp4sp_m; 3002 3003 if (fs->location >= num_t2_scr) 3004 continue; 3005 3006 t2_scr = gem_readl_n(bp, SCRT2, fs->location); 3007 3008 /* enable/disable screener regs for the flow entry */ 3009 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); 3010 3011 /* only enable fields with no masking */ 3012 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 3013 3014 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) 3015 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); 3016 else 3017 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); 3018 3019 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) 3020 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); 3021 else 3022 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); 3023 3024 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) 3025 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); 3026 else 3027 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); 3028 3029 gem_writel_n(bp, SCRT2, fs->location, t2_scr); 3030 } 3031 } 3032 3033 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) 3034 { 3035 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; 3036 uint16_t index = fs->location; 3037 u32 w0, w1, t2_scr; 3038 bool cmp_a = false; 3039 bool cmp_b = false; 3040 bool cmp_c = false; 3041 3042 tp4sp_v = &(fs->h_u.tcp_ip4_spec); 3043 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 3044 3045 /* ignore field if any masking set */ 3046 if (tp4sp_m->ip4src == 0xFFFFFFFF) { 3047 /* 1st compare reg - IP source address */ 3048 w0 = 0; 3049 w1 = 0; 3050 w0 = tp4sp_v->ip4src; 3051 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3052 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3053 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); 3054 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); 3055 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); 3056 cmp_a = true; 3057 } 3058 3059 /* ignore field if any masking set */ 3060 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { 3061 /* 2nd compare reg - IP destination address */ 3062 w0 = 0; 3063 w1 = 0; 3064 w0 = tp4sp_v->ip4dst; 3065 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3066 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3067 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); 3068 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); 3069 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); 3070 cmp_b = true; 3071 } 3072 3073 /* ignore both port fields if masking set in both */ 3074 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { 3075 /* 3rd compare reg - source port, destination port */ 3076 w0 = 0; 3077 w1 = 0; 3078 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); 3079 if (tp4sp_m->psrc == tp4sp_m->pdst) { 3080 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); 3081 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3082 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3083 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3084 } else { 3085 /* only one port definition */ 3086 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ 3087 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); 3088 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ 3089 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); 3090 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3091 } else { /* dst port */ 3092 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3093 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); 3094 } 3095 } 3096 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); 3097 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); 3098 cmp_c = true; 3099 } 3100 3101 t2_scr = 0; 3102 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); 3103 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); 3104 if (cmp_a) 3105 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); 3106 if (cmp_b) 3107 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); 3108 if (cmp_c) 3109 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); 3110 gem_writel_n(bp, SCRT2, index, t2_scr); 3111 } 3112 3113 static int gem_add_flow_filter(struct net_device *netdev, 3114 struct ethtool_rxnfc *cmd) 3115 { 3116 struct macb *bp = netdev_priv(netdev); 3117 struct ethtool_rx_flow_spec *fs = &cmd->fs; 3118 struct ethtool_rx_fs_item *item, *newfs; 3119 unsigned long flags; 3120 int ret = -EINVAL; 3121 bool added = false; 3122 3123 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); 3124 if (newfs == NULL) 3125 return -ENOMEM; 3126 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); 3127 3128 netdev_dbg(netdev, 3129 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3130 fs->flow_type, (int)fs->ring_cookie, fs->location, 3131 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3132 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3133 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); 3134 3135 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3136 3137 /* find correct place to add in list */ 3138 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3139 if (item->fs.location > newfs->fs.location) { 3140 list_add_tail(&newfs->list, &item->list); 3141 added = true; 3142 break; 3143 } else if (item->fs.location == fs->location) { 3144 netdev_err(netdev, "Rule not added: location %d not free!\n", 3145 fs->location); 3146 ret = -EBUSY; 3147 goto err; 3148 } 3149 } 3150 if (!added) 3151 list_add_tail(&newfs->list, &bp->rx_fs_list.list); 3152 3153 gem_prog_cmp_regs(bp, fs); 3154 bp->rx_fs_list.count++; 3155 /* enable filtering if NTUPLE on */ 3156 gem_enable_flow_filters(bp, 1); 3157 3158 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3159 return 0; 3160 3161 err: 3162 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3163 kfree(newfs); 3164 return ret; 3165 } 3166 3167 static int gem_del_flow_filter(struct net_device *netdev, 3168 struct ethtool_rxnfc *cmd) 3169 { 3170 struct macb *bp = netdev_priv(netdev); 3171 struct ethtool_rx_fs_item *item; 3172 struct ethtool_rx_flow_spec *fs; 3173 unsigned long flags; 3174 3175 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3176 3177 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3178 if (item->fs.location == cmd->fs.location) { 3179 /* disable screener regs for the flow entry */ 3180 fs = &(item->fs); 3181 netdev_dbg(netdev, 3182 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3183 fs->flow_type, (int)fs->ring_cookie, fs->location, 3184 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3185 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3186 htons(fs->h_u.tcp_ip4_spec.psrc), 3187 htons(fs->h_u.tcp_ip4_spec.pdst)); 3188 3189 gem_writel_n(bp, SCRT2, fs->location, 0); 3190 3191 list_del(&item->list); 3192 bp->rx_fs_list.count--; 3193 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3194 kfree(item); 3195 return 0; 3196 } 3197 } 3198 3199 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3200 return -EINVAL; 3201 } 3202 3203 static int gem_get_flow_entry(struct net_device *netdev, 3204 struct ethtool_rxnfc *cmd) 3205 { 3206 struct macb *bp = netdev_priv(netdev); 3207 struct ethtool_rx_fs_item *item; 3208 3209 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3210 if (item->fs.location == cmd->fs.location) { 3211 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); 3212 return 0; 3213 } 3214 } 3215 return -EINVAL; 3216 } 3217 3218 static int gem_get_all_flow_entries(struct net_device *netdev, 3219 struct ethtool_rxnfc *cmd, u32 *rule_locs) 3220 { 3221 struct macb *bp = netdev_priv(netdev); 3222 struct ethtool_rx_fs_item *item; 3223 uint32_t cnt = 0; 3224 3225 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3226 if (cnt == cmd->rule_cnt) 3227 return -EMSGSIZE; 3228 rule_locs[cnt] = item->fs.location; 3229 cnt++; 3230 } 3231 cmd->data = bp->max_tuples; 3232 cmd->rule_cnt = cnt; 3233 3234 return 0; 3235 } 3236 3237 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3238 u32 *rule_locs) 3239 { 3240 struct macb *bp = netdev_priv(netdev); 3241 int ret = 0; 3242 3243 switch (cmd->cmd) { 3244 case ETHTOOL_GRXRINGS: 3245 cmd->data = bp->num_queues; 3246 break; 3247 case ETHTOOL_GRXCLSRLCNT: 3248 cmd->rule_cnt = bp->rx_fs_list.count; 3249 break; 3250 case ETHTOOL_GRXCLSRULE: 3251 ret = gem_get_flow_entry(netdev, cmd); 3252 break; 3253 case ETHTOOL_GRXCLSRLALL: 3254 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); 3255 break; 3256 default: 3257 netdev_err(netdev, 3258 "Command parameter %d is not supported\n", cmd->cmd); 3259 ret = -EOPNOTSUPP; 3260 } 3261 3262 return ret; 3263 } 3264 3265 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3266 { 3267 struct macb *bp = netdev_priv(netdev); 3268 int ret; 3269 3270 switch (cmd->cmd) { 3271 case ETHTOOL_SRXCLSRLINS: 3272 if ((cmd->fs.location >= bp->max_tuples) 3273 || (cmd->fs.ring_cookie >= bp->num_queues)) { 3274 ret = -EINVAL; 3275 break; 3276 } 3277 ret = gem_add_flow_filter(netdev, cmd); 3278 break; 3279 case ETHTOOL_SRXCLSRLDEL: 3280 ret = gem_del_flow_filter(netdev, cmd); 3281 break; 3282 default: 3283 netdev_err(netdev, 3284 "Command parameter %d is not supported\n", cmd->cmd); 3285 ret = -EOPNOTSUPP; 3286 } 3287 3288 return ret; 3289 } 3290 3291 static const struct ethtool_ops macb_ethtool_ops = { 3292 .get_regs_len = macb_get_regs_len, 3293 .get_regs = macb_get_regs, 3294 .get_link = ethtool_op_get_link, 3295 .get_ts_info = ethtool_op_get_ts_info, 3296 .get_wol = macb_get_wol, 3297 .set_wol = macb_set_wol, 3298 .get_link_ksettings = macb_get_link_ksettings, 3299 .set_link_ksettings = macb_set_link_ksettings, 3300 .get_ringparam = macb_get_ringparam, 3301 .set_ringparam = macb_set_ringparam, 3302 }; 3303 3304 static const struct ethtool_ops gem_ethtool_ops = { 3305 .get_regs_len = macb_get_regs_len, 3306 .get_regs = macb_get_regs, 3307 .get_link = ethtool_op_get_link, 3308 .get_ts_info = macb_get_ts_info, 3309 .get_ethtool_stats = gem_get_ethtool_stats, 3310 .get_strings = gem_get_ethtool_strings, 3311 .get_sset_count = gem_get_sset_count, 3312 .get_link_ksettings = macb_get_link_ksettings, 3313 .set_link_ksettings = macb_set_link_ksettings, 3314 .get_ringparam = macb_get_ringparam, 3315 .set_ringparam = macb_set_ringparam, 3316 .get_rxnfc = gem_get_rxnfc, 3317 .set_rxnfc = gem_set_rxnfc, 3318 }; 3319 3320 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3321 { 3322 struct macb *bp = netdev_priv(dev); 3323 3324 if (!netif_running(dev)) 3325 return -EINVAL; 3326 3327 if (bp->ptp_info) { 3328 switch (cmd) { 3329 case SIOCSHWTSTAMP: 3330 return bp->ptp_info->set_hwtst(dev, rq, cmd); 3331 case SIOCGHWTSTAMP: 3332 return bp->ptp_info->get_hwtst(dev, rq); 3333 } 3334 } 3335 3336 return phylink_mii_ioctl(bp->phylink, rq, cmd); 3337 } 3338 3339 static inline void macb_set_txcsum_feature(struct macb *bp, 3340 netdev_features_t features) 3341 { 3342 u32 val; 3343 3344 if (!macb_is_gem(bp)) 3345 return; 3346 3347 val = gem_readl(bp, DMACFG); 3348 if (features & NETIF_F_HW_CSUM) 3349 val |= GEM_BIT(TXCOEN); 3350 else 3351 val &= ~GEM_BIT(TXCOEN); 3352 3353 gem_writel(bp, DMACFG, val); 3354 } 3355 3356 static inline void macb_set_rxcsum_feature(struct macb *bp, 3357 netdev_features_t features) 3358 { 3359 struct net_device *netdev = bp->dev; 3360 u32 val; 3361 3362 if (!macb_is_gem(bp)) 3363 return; 3364 3365 val = gem_readl(bp, NCFGR); 3366 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) 3367 val |= GEM_BIT(RXCOEN); 3368 else 3369 val &= ~GEM_BIT(RXCOEN); 3370 3371 gem_writel(bp, NCFGR, val); 3372 } 3373 3374 static inline void macb_set_rxflow_feature(struct macb *bp, 3375 netdev_features_t features) 3376 { 3377 if (!macb_is_gem(bp)) 3378 return; 3379 3380 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); 3381 } 3382 3383 static int macb_set_features(struct net_device *netdev, 3384 netdev_features_t features) 3385 { 3386 struct macb *bp = netdev_priv(netdev); 3387 netdev_features_t changed = features ^ netdev->features; 3388 3389 /* TX checksum offload */ 3390 if (changed & NETIF_F_HW_CSUM) 3391 macb_set_txcsum_feature(bp, features); 3392 3393 /* RX checksum offload */ 3394 if (changed & NETIF_F_RXCSUM) 3395 macb_set_rxcsum_feature(bp, features); 3396 3397 /* RX Flow Filters */ 3398 if (changed & NETIF_F_NTUPLE) 3399 macb_set_rxflow_feature(bp, features); 3400 3401 return 0; 3402 } 3403 3404 static void macb_restore_features(struct macb *bp) 3405 { 3406 struct net_device *netdev = bp->dev; 3407 netdev_features_t features = netdev->features; 3408 3409 /* TX checksum offload */ 3410 macb_set_txcsum_feature(bp, features); 3411 3412 /* RX checksum offload */ 3413 macb_set_rxcsum_feature(bp, features); 3414 3415 /* RX Flow Filters */ 3416 macb_set_rxflow_feature(bp, features); 3417 } 3418 3419 static const struct net_device_ops macb_netdev_ops = { 3420 .ndo_open = macb_open, 3421 .ndo_stop = macb_close, 3422 .ndo_start_xmit = macb_start_xmit, 3423 .ndo_set_rx_mode = macb_set_rx_mode, 3424 .ndo_get_stats = macb_get_stats, 3425 .ndo_do_ioctl = macb_ioctl, 3426 .ndo_validate_addr = eth_validate_addr, 3427 .ndo_change_mtu = macb_change_mtu, 3428 .ndo_set_mac_address = eth_mac_addr, 3429 #ifdef CONFIG_NET_POLL_CONTROLLER 3430 .ndo_poll_controller = macb_poll_controller, 3431 #endif 3432 .ndo_set_features = macb_set_features, 3433 .ndo_features_check = macb_features_check, 3434 }; 3435 3436 /* Configure peripheral capabilities according to device tree 3437 * and integration options used 3438 */ 3439 static void macb_configure_caps(struct macb *bp, 3440 const struct macb_config *dt_conf) 3441 { 3442 u32 dcfg; 3443 3444 if (dt_conf) 3445 bp->caps = dt_conf->caps; 3446 3447 if (hw_is_gem(bp->regs, bp->native_io)) { 3448 bp->caps |= MACB_CAPS_MACB_IS_GEM; 3449 3450 dcfg = gem_readl(bp, DCFG1); 3451 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 3452 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 3453 dcfg = gem_readl(bp, DCFG2); 3454 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 3455 bp->caps |= MACB_CAPS_FIFO_MODE; 3456 #ifdef CONFIG_MACB_USE_HWSTAMP 3457 if (gem_has_ptp(bp)) { 3458 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 3459 dev_err(&bp->pdev->dev, 3460 "GEM doesn't support hardware ptp.\n"); 3461 else { 3462 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 3463 bp->ptp_info = &gem_ptp_info; 3464 } 3465 } 3466 #endif 3467 } 3468 3469 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 3470 } 3471 3472 static void macb_probe_queues(void __iomem *mem, 3473 bool native_io, 3474 unsigned int *queue_mask, 3475 unsigned int *num_queues) 3476 { 3477 unsigned int hw_q; 3478 3479 *queue_mask = 0x1; 3480 *num_queues = 1; 3481 3482 /* is it macb or gem ? 3483 * 3484 * We need to read directly from the hardware here because 3485 * we are early in the probe process and don't have the 3486 * MACB_CAPS_MACB_IS_GEM flag positioned 3487 */ 3488 if (!hw_is_gem(mem, native_io)) 3489 return; 3490 3491 /* bit 0 is never set but queue 0 always exists */ 3492 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; 3493 3494 *queue_mask |= 0x1; 3495 3496 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 3497 if (*queue_mask & (1 << hw_q)) 3498 (*num_queues)++; 3499 } 3500 3501 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 3502 struct clk **hclk, struct clk **tx_clk, 3503 struct clk **rx_clk, struct clk **tsu_clk) 3504 { 3505 struct macb_platform_data *pdata; 3506 int err; 3507 3508 pdata = dev_get_platdata(&pdev->dev); 3509 if (pdata) { 3510 *pclk = pdata->pclk; 3511 *hclk = pdata->hclk; 3512 } else { 3513 *pclk = devm_clk_get(&pdev->dev, "pclk"); 3514 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3515 } 3516 3517 if (IS_ERR_OR_NULL(*pclk)) { 3518 err = PTR_ERR(*pclk); 3519 if (!err) 3520 err = -ENODEV; 3521 3522 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); 3523 return err; 3524 } 3525 3526 if (IS_ERR_OR_NULL(*hclk)) { 3527 err = PTR_ERR(*hclk); 3528 if (!err) 3529 err = -ENODEV; 3530 3531 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); 3532 return err; 3533 } 3534 3535 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); 3536 if (IS_ERR(*tx_clk)) 3537 return PTR_ERR(*tx_clk); 3538 3539 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); 3540 if (IS_ERR(*rx_clk)) 3541 return PTR_ERR(*rx_clk); 3542 3543 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); 3544 if (IS_ERR(*tsu_clk)) 3545 return PTR_ERR(*tsu_clk); 3546 3547 err = clk_prepare_enable(*pclk); 3548 if (err) { 3549 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3550 return err; 3551 } 3552 3553 err = clk_prepare_enable(*hclk); 3554 if (err) { 3555 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); 3556 goto err_disable_pclk; 3557 } 3558 3559 err = clk_prepare_enable(*tx_clk); 3560 if (err) { 3561 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 3562 goto err_disable_hclk; 3563 } 3564 3565 err = clk_prepare_enable(*rx_clk); 3566 if (err) { 3567 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 3568 goto err_disable_txclk; 3569 } 3570 3571 err = clk_prepare_enable(*tsu_clk); 3572 if (err) { 3573 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); 3574 goto err_disable_rxclk; 3575 } 3576 3577 return 0; 3578 3579 err_disable_rxclk: 3580 clk_disable_unprepare(*rx_clk); 3581 3582 err_disable_txclk: 3583 clk_disable_unprepare(*tx_clk); 3584 3585 err_disable_hclk: 3586 clk_disable_unprepare(*hclk); 3587 3588 err_disable_pclk: 3589 clk_disable_unprepare(*pclk); 3590 3591 return err; 3592 } 3593 3594 static int macb_init(struct platform_device *pdev) 3595 { 3596 struct net_device *dev = platform_get_drvdata(pdev); 3597 unsigned int hw_q, q; 3598 struct macb *bp = netdev_priv(dev); 3599 struct macb_queue *queue; 3600 int err; 3601 u32 val, reg; 3602 3603 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 3604 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 3605 3606 /* set the queue register mapping once for all: queue0 has a special 3607 * register mapping but we don't want to test the queue index then 3608 * compute the corresponding register offset at run time. 3609 */ 3610 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 3611 if (!(bp->queue_mask & (1 << hw_q))) 3612 continue; 3613 3614 queue = &bp->queues[q]; 3615 queue->bp = bp; 3616 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); 3617 if (hw_q) { 3618 queue->ISR = GEM_ISR(hw_q - 1); 3619 queue->IER = GEM_IER(hw_q - 1); 3620 queue->IDR = GEM_IDR(hw_q - 1); 3621 queue->IMR = GEM_IMR(hw_q - 1); 3622 queue->TBQP = GEM_TBQP(hw_q - 1); 3623 queue->RBQP = GEM_RBQP(hw_q - 1); 3624 queue->RBQS = GEM_RBQS(hw_q - 1); 3625 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3626 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3627 queue->TBQPH = GEM_TBQPH(hw_q - 1); 3628 queue->RBQPH = GEM_RBQPH(hw_q - 1); 3629 } 3630 #endif 3631 } else { 3632 /* queue0 uses legacy registers */ 3633 queue->ISR = MACB_ISR; 3634 queue->IER = MACB_IER; 3635 queue->IDR = MACB_IDR; 3636 queue->IMR = MACB_IMR; 3637 queue->TBQP = MACB_TBQP; 3638 queue->RBQP = MACB_RBQP; 3639 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3640 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3641 queue->TBQPH = MACB_TBQPH; 3642 queue->RBQPH = MACB_RBQPH; 3643 } 3644 #endif 3645 } 3646 3647 /* get irq: here we use the linux queue index, not the hardware 3648 * queue index. the queue irq definitions in the device tree 3649 * must remove the optional gaps that could exist in the 3650 * hardware queue mask. 3651 */ 3652 queue->irq = platform_get_irq(pdev, q); 3653 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 3654 IRQF_SHARED, dev->name, queue); 3655 if (err) { 3656 dev_err(&pdev->dev, 3657 "Unable to request IRQ %d (error %d)\n", 3658 queue->irq, err); 3659 return err; 3660 } 3661 3662 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 3663 q++; 3664 } 3665 3666 dev->netdev_ops = &macb_netdev_ops; 3667 3668 /* setup appropriated routines according to adapter type */ 3669 if (macb_is_gem(bp)) { 3670 bp->max_tx_length = GEM_MAX_TX_LEN; 3671 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 3672 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 3673 bp->macbgem_ops.mog_init_rings = gem_init_rings; 3674 bp->macbgem_ops.mog_rx = gem_rx; 3675 dev->ethtool_ops = &gem_ethtool_ops; 3676 } else { 3677 bp->max_tx_length = MACB_MAX_TX_LEN; 3678 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 3679 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 3680 bp->macbgem_ops.mog_init_rings = macb_init_rings; 3681 bp->macbgem_ops.mog_rx = macb_rx; 3682 dev->ethtool_ops = &macb_ethtool_ops; 3683 } 3684 3685 /* Set features */ 3686 dev->hw_features = NETIF_F_SG; 3687 3688 /* Check LSO capability */ 3689 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 3690 dev->hw_features |= MACB_NETIF_LSO; 3691 3692 /* Checksum offload is only available on gem with packet buffer */ 3693 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 3694 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3695 if (bp->caps & MACB_CAPS_SG_DISABLED) 3696 dev->hw_features &= ~NETIF_F_SG; 3697 dev->features = dev->hw_features; 3698 3699 /* Check RX Flow Filters support. 3700 * Max Rx flows set by availability of screeners & compare regs: 3701 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 3702 */ 3703 reg = gem_readl(bp, DCFG8); 3704 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 3705 GEM_BFEXT(T2SCR, reg)); 3706 if (bp->max_tuples > 0) { 3707 /* also needs one ethtype match to check IPv4 */ 3708 if (GEM_BFEXT(SCR2ETH, reg) > 0) { 3709 /* program this reg now */ 3710 reg = 0; 3711 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); 3712 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); 3713 /* Filtering is supported in hw but don't enable it in kernel now */ 3714 dev->hw_features |= NETIF_F_NTUPLE; 3715 /* init Rx flow definitions */ 3716 INIT_LIST_HEAD(&bp->rx_fs_list.list); 3717 bp->rx_fs_list.count = 0; 3718 spin_lock_init(&bp->rx_fs_lock); 3719 } else 3720 bp->max_tuples = 0; 3721 } 3722 3723 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3724 val = 0; 3725 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 3726 val = GEM_BIT(RGMII); 3727 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3728 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3729 val = MACB_BIT(RMII); 3730 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3731 val = MACB_BIT(MII); 3732 3733 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 3734 val |= MACB_BIT(CLKEN); 3735 3736 macb_or_gem_writel(bp, USRIO, val); 3737 } 3738 3739 /* Set MII management clock divider */ 3740 val = macb_mdc_clk_div(bp); 3741 val |= macb_dbw(bp); 3742 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 3743 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 3744 macb_writel(bp, NCFGR, val); 3745 3746 return 0; 3747 } 3748 3749 #if defined(CONFIG_OF) 3750 /* 1518 rounded up */ 3751 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3752 /* max number of receive buffers */ 3753 #define AT91ETHER_MAX_RX_DESCR 9 3754 3755 static struct sifive_fu540_macb_mgmt *mgmt; 3756 3757 /* Initialize and start the Receiver and Transmit subsystems */ 3758 static int at91ether_start(struct net_device *dev) 3759 { 3760 struct macb *lp = netdev_priv(dev); 3761 struct macb_queue *q = &lp->queues[0]; 3762 struct macb_dma_desc *desc; 3763 dma_addr_t addr; 3764 u32 ctl; 3765 int i; 3766 3767 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 3768 (AT91ETHER_MAX_RX_DESCR * 3769 macb_dma_desc_get_size(lp)), 3770 &q->rx_ring_dma, GFP_KERNEL); 3771 if (!q->rx_ring) 3772 return -ENOMEM; 3773 3774 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 3775 AT91ETHER_MAX_RX_DESCR * 3776 AT91ETHER_MAX_RBUFF_SZ, 3777 &q->rx_buffers_dma, GFP_KERNEL); 3778 if (!q->rx_buffers) { 3779 dma_free_coherent(&lp->pdev->dev, 3780 AT91ETHER_MAX_RX_DESCR * 3781 macb_dma_desc_get_size(lp), 3782 q->rx_ring, q->rx_ring_dma); 3783 q->rx_ring = NULL; 3784 return -ENOMEM; 3785 } 3786 3787 addr = q->rx_buffers_dma; 3788 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 3789 desc = macb_rx_desc(q, i); 3790 macb_set_addr(lp, desc, addr); 3791 desc->ctrl = 0; 3792 addr += AT91ETHER_MAX_RBUFF_SZ; 3793 } 3794 3795 /* Set the Wrap bit on the last descriptor */ 3796 desc->addr |= MACB_BIT(RX_WRAP); 3797 3798 /* Reset buffer index */ 3799 q->rx_tail = 0; 3800 3801 /* Program address of descriptor list in Rx Buffer Queue register */ 3802 macb_writel(lp, RBQP, q->rx_ring_dma); 3803 3804 /* Enable Receive and Transmit */ 3805 ctl = macb_readl(lp, NCR); 3806 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 3807 3808 return 0; 3809 } 3810 3811 /* Open the ethernet interface */ 3812 static int at91ether_open(struct net_device *dev) 3813 { 3814 struct macb *lp = netdev_priv(dev); 3815 u32 ctl; 3816 int ret; 3817 3818 ret = pm_runtime_get_sync(&lp->pdev->dev); 3819 if (ret < 0) 3820 return ret; 3821 3822 /* Clear internal statistics */ 3823 ctl = macb_readl(lp, NCR); 3824 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 3825 3826 macb_set_hwaddr(lp); 3827 3828 ret = at91ether_start(dev); 3829 if (ret) 3830 return ret; 3831 3832 /* Enable MAC interrupts */ 3833 macb_writel(lp, IER, MACB_BIT(RCOMP) | 3834 MACB_BIT(RXUBR) | 3835 MACB_BIT(ISR_TUND) | 3836 MACB_BIT(ISR_RLE) | 3837 MACB_BIT(TCOMP) | 3838 MACB_BIT(ISR_ROVR) | 3839 MACB_BIT(HRESP)); 3840 3841 ret = macb_phylink_connect(lp); 3842 if (ret) 3843 return ret; 3844 3845 netif_start_queue(dev); 3846 3847 return 0; 3848 } 3849 3850 /* Close the interface */ 3851 static int at91ether_close(struct net_device *dev) 3852 { 3853 struct macb *lp = netdev_priv(dev); 3854 struct macb_queue *q = &lp->queues[0]; 3855 u32 ctl; 3856 3857 /* Disable Receiver and Transmitter */ 3858 ctl = macb_readl(lp, NCR); 3859 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 3860 3861 /* Disable MAC interrupts */ 3862 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 3863 MACB_BIT(RXUBR) | 3864 MACB_BIT(ISR_TUND) | 3865 MACB_BIT(ISR_RLE) | 3866 MACB_BIT(TCOMP) | 3867 MACB_BIT(ISR_ROVR) | 3868 MACB_BIT(HRESP)); 3869 3870 netif_stop_queue(dev); 3871 3872 phylink_stop(lp->phylink); 3873 phylink_disconnect_phy(lp->phylink); 3874 3875 dma_free_coherent(&lp->pdev->dev, 3876 AT91ETHER_MAX_RX_DESCR * 3877 macb_dma_desc_get_size(lp), 3878 q->rx_ring, q->rx_ring_dma); 3879 q->rx_ring = NULL; 3880 3881 dma_free_coherent(&lp->pdev->dev, 3882 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, 3883 q->rx_buffers, q->rx_buffers_dma); 3884 q->rx_buffers = NULL; 3885 3886 return pm_runtime_put(&lp->pdev->dev); 3887 } 3888 3889 /* Transmit packet */ 3890 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, 3891 struct net_device *dev) 3892 { 3893 struct macb *lp = netdev_priv(dev); 3894 3895 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 3896 netif_stop_queue(dev); 3897 3898 /* Store packet information (to free when Tx completed) */ 3899 lp->skb = skb; 3900 lp->skb_length = skb->len; 3901 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data, 3902 skb->len, DMA_TO_DEVICE); 3903 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) { 3904 dev_kfree_skb_any(skb); 3905 dev->stats.tx_dropped++; 3906 netdev_err(dev, "%s: DMA mapping error\n", __func__); 3907 return NETDEV_TX_OK; 3908 } 3909 3910 /* Set address of the data in the Transmit Address register */ 3911 macb_writel(lp, TAR, lp->skb_physaddr); 3912 /* Set length of the packet in the Transmit Control register */ 3913 macb_writel(lp, TCR, skb->len); 3914 3915 } else { 3916 netdev_err(dev, "%s called, but device is busy!\n", __func__); 3917 return NETDEV_TX_BUSY; 3918 } 3919 3920 return NETDEV_TX_OK; 3921 } 3922 3923 /* Extract received frame from buffer descriptors and sent to upper layers. 3924 * (Called from interrupt context) 3925 */ 3926 static void at91ether_rx(struct net_device *dev) 3927 { 3928 struct macb *lp = netdev_priv(dev); 3929 struct macb_queue *q = &lp->queues[0]; 3930 struct macb_dma_desc *desc; 3931 unsigned char *p_recv; 3932 struct sk_buff *skb; 3933 unsigned int pktlen; 3934 3935 desc = macb_rx_desc(q, q->rx_tail); 3936 while (desc->addr & MACB_BIT(RX_USED)) { 3937 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 3938 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 3939 skb = netdev_alloc_skb(dev, pktlen + 2); 3940 if (skb) { 3941 skb_reserve(skb, 2); 3942 skb_put_data(skb, p_recv, pktlen); 3943 3944 skb->protocol = eth_type_trans(skb, dev); 3945 dev->stats.rx_packets++; 3946 dev->stats.rx_bytes += pktlen; 3947 netif_rx(skb); 3948 } else { 3949 dev->stats.rx_dropped++; 3950 } 3951 3952 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 3953 dev->stats.multicast++; 3954 3955 /* reset ownership bit */ 3956 desc->addr &= ~MACB_BIT(RX_USED); 3957 3958 /* wrap after last buffer */ 3959 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 3960 q->rx_tail = 0; 3961 else 3962 q->rx_tail++; 3963 3964 desc = macb_rx_desc(q, q->rx_tail); 3965 } 3966 } 3967 3968 /* MAC interrupt handler */ 3969 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 3970 { 3971 struct net_device *dev = dev_id; 3972 struct macb *lp = netdev_priv(dev); 3973 u32 intstatus, ctl; 3974 3975 /* MAC Interrupt Status register indicates what interrupts are pending. 3976 * It is automatically cleared once read. 3977 */ 3978 intstatus = macb_readl(lp, ISR); 3979 3980 /* Receive complete */ 3981 if (intstatus & MACB_BIT(RCOMP)) 3982 at91ether_rx(dev); 3983 3984 /* Transmit complete */ 3985 if (intstatus & MACB_BIT(TCOMP)) { 3986 /* The TCOM bit is set even if the transmission failed */ 3987 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 3988 dev->stats.tx_errors++; 3989 3990 if (lp->skb) { 3991 dev_consume_skb_irq(lp->skb); 3992 lp->skb = NULL; 3993 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr, 3994 lp->skb_length, DMA_TO_DEVICE); 3995 dev->stats.tx_packets++; 3996 dev->stats.tx_bytes += lp->skb_length; 3997 } 3998 netif_wake_queue(dev); 3999 } 4000 4001 /* Work-around for EMAC Errata section 41.3.1 */ 4002 if (intstatus & MACB_BIT(RXUBR)) { 4003 ctl = macb_readl(lp, NCR); 4004 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 4005 wmb(); 4006 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 4007 } 4008 4009 if (intstatus & MACB_BIT(ISR_ROVR)) 4010 netdev_err(dev, "ROVR error\n"); 4011 4012 return IRQ_HANDLED; 4013 } 4014 4015 #ifdef CONFIG_NET_POLL_CONTROLLER 4016 static void at91ether_poll_controller(struct net_device *dev) 4017 { 4018 unsigned long flags; 4019 4020 local_irq_save(flags); 4021 at91ether_interrupt(dev->irq, dev); 4022 local_irq_restore(flags); 4023 } 4024 #endif 4025 4026 static const struct net_device_ops at91ether_netdev_ops = { 4027 .ndo_open = at91ether_open, 4028 .ndo_stop = at91ether_close, 4029 .ndo_start_xmit = at91ether_start_xmit, 4030 .ndo_get_stats = macb_get_stats, 4031 .ndo_set_rx_mode = macb_set_rx_mode, 4032 .ndo_set_mac_address = eth_mac_addr, 4033 .ndo_do_ioctl = macb_ioctl, 4034 .ndo_validate_addr = eth_validate_addr, 4035 #ifdef CONFIG_NET_POLL_CONTROLLER 4036 .ndo_poll_controller = at91ether_poll_controller, 4037 #endif 4038 }; 4039 4040 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 4041 struct clk **hclk, struct clk **tx_clk, 4042 struct clk **rx_clk, struct clk **tsu_clk) 4043 { 4044 int err; 4045 4046 *hclk = NULL; 4047 *tx_clk = NULL; 4048 *rx_clk = NULL; 4049 *tsu_clk = NULL; 4050 4051 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 4052 if (IS_ERR(*pclk)) 4053 return PTR_ERR(*pclk); 4054 4055 err = clk_prepare_enable(*pclk); 4056 if (err) { 4057 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 4058 return err; 4059 } 4060 4061 return 0; 4062 } 4063 4064 static int at91ether_init(struct platform_device *pdev) 4065 { 4066 struct net_device *dev = platform_get_drvdata(pdev); 4067 struct macb *bp = netdev_priv(dev); 4068 int err; 4069 4070 bp->queues[0].bp = bp; 4071 4072 dev->netdev_ops = &at91ether_netdev_ops; 4073 dev->ethtool_ops = &macb_ethtool_ops; 4074 4075 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 4076 0, dev->name, dev); 4077 if (err) 4078 return err; 4079 4080 macb_writel(bp, NCR, 0); 4081 4082 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); 4083 4084 return 0; 4085 } 4086 4087 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, 4088 unsigned long parent_rate) 4089 { 4090 return mgmt->rate; 4091 } 4092 4093 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, 4094 unsigned long *parent_rate) 4095 { 4096 if (WARN_ON(rate < 2500000)) 4097 return 2500000; 4098 else if (rate == 2500000) 4099 return 2500000; 4100 else if (WARN_ON(rate < 13750000)) 4101 return 2500000; 4102 else if (WARN_ON(rate < 25000000)) 4103 return 25000000; 4104 else if (rate == 25000000) 4105 return 25000000; 4106 else if (WARN_ON(rate < 75000000)) 4107 return 25000000; 4108 else if (WARN_ON(rate < 125000000)) 4109 return 125000000; 4110 else if (rate == 125000000) 4111 return 125000000; 4112 4113 WARN_ON(rate > 125000000); 4114 4115 return 125000000; 4116 } 4117 4118 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, 4119 unsigned long parent_rate) 4120 { 4121 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); 4122 if (rate != 125000000) 4123 iowrite32(1, mgmt->reg); 4124 else 4125 iowrite32(0, mgmt->reg); 4126 mgmt->rate = rate; 4127 4128 return 0; 4129 } 4130 4131 static const struct clk_ops fu540_c000_ops = { 4132 .recalc_rate = fu540_macb_tx_recalc_rate, 4133 .round_rate = fu540_macb_tx_round_rate, 4134 .set_rate = fu540_macb_tx_set_rate, 4135 }; 4136 4137 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, 4138 struct clk **hclk, struct clk **tx_clk, 4139 struct clk **rx_clk, struct clk **tsu_clk) 4140 { 4141 struct clk_init_data init; 4142 int err = 0; 4143 4144 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); 4145 if (err) 4146 return err; 4147 4148 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); 4149 if (!mgmt) 4150 return -ENOMEM; 4151 4152 init.name = "sifive-gemgxl-mgmt"; 4153 init.ops = &fu540_c000_ops; 4154 init.flags = 0; 4155 init.num_parents = 0; 4156 4157 mgmt->rate = 0; 4158 mgmt->hw.init = &init; 4159 4160 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); 4161 if (IS_ERR(*tx_clk)) 4162 return PTR_ERR(*tx_clk); 4163 4164 err = clk_prepare_enable(*tx_clk); 4165 if (err) 4166 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 4167 else 4168 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); 4169 4170 return 0; 4171 } 4172 4173 static int fu540_c000_init(struct platform_device *pdev) 4174 { 4175 struct resource *res; 4176 4177 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 4178 if (!res) 4179 return -ENODEV; 4180 4181 mgmt->reg = ioremap(res->start, resource_size(res)); 4182 if (!mgmt->reg) 4183 return -ENOMEM; 4184 4185 return macb_init(pdev); 4186 } 4187 4188 static const struct macb_config fu540_c000_config = { 4189 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | 4190 MACB_CAPS_GEM_HAS_PTP, 4191 .dma_burst_length = 16, 4192 .clk_init = fu540_c000_clk_init, 4193 .init = fu540_c000_init, 4194 .jumbo_max_len = 10240, 4195 }; 4196 4197 static const struct macb_config at91sam9260_config = { 4198 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4199 .clk_init = macb_clk_init, 4200 .init = macb_init, 4201 }; 4202 4203 static const struct macb_config sama5d3macb_config = { 4204 .caps = MACB_CAPS_SG_DISABLED 4205 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4206 .clk_init = macb_clk_init, 4207 .init = macb_init, 4208 }; 4209 4210 static const struct macb_config pc302gem_config = { 4211 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 4212 .dma_burst_length = 16, 4213 .clk_init = macb_clk_init, 4214 .init = macb_init, 4215 }; 4216 4217 static const struct macb_config sama5d2_config = { 4218 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4219 .dma_burst_length = 16, 4220 .clk_init = macb_clk_init, 4221 .init = macb_init, 4222 }; 4223 4224 static const struct macb_config sama5d3_config = { 4225 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 4226 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 4227 .dma_burst_length = 16, 4228 .clk_init = macb_clk_init, 4229 .init = macb_init, 4230 .jumbo_max_len = 10240, 4231 }; 4232 4233 static const struct macb_config sama5d4_config = { 4234 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4235 .dma_burst_length = 4, 4236 .clk_init = macb_clk_init, 4237 .init = macb_init, 4238 }; 4239 4240 static const struct macb_config emac_config = { 4241 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, 4242 .clk_init = at91ether_clk_init, 4243 .init = at91ether_init, 4244 }; 4245 4246 static const struct macb_config np4_config = { 4247 .caps = MACB_CAPS_USRIO_DISABLED, 4248 .clk_init = macb_clk_init, 4249 .init = macb_init, 4250 }; 4251 4252 static const struct macb_config zynqmp_config = { 4253 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4254 MACB_CAPS_JUMBO | 4255 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, 4256 .dma_burst_length = 16, 4257 .clk_init = macb_clk_init, 4258 .init = macb_init, 4259 .jumbo_max_len = 10240, 4260 }; 4261 4262 static const struct macb_config zynq_config = { 4263 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | 4264 MACB_CAPS_NEEDS_RSTONUBR, 4265 .dma_burst_length = 16, 4266 .clk_init = macb_clk_init, 4267 .init = macb_init, 4268 }; 4269 4270 static const struct of_device_id macb_dt_ids[] = { 4271 { .compatible = "cdns,at32ap7000-macb" }, 4272 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 4273 { .compatible = "cdns,macb" }, 4274 { .compatible = "cdns,np4-macb", .data = &np4_config }, 4275 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 4276 { .compatible = "cdns,gem", .data = &pc302gem_config }, 4277 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, 4278 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 4279 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 4280 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, 4281 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 4282 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 4283 { .compatible = "cdns,emac", .data = &emac_config }, 4284 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4285 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4286 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, 4287 { /* sentinel */ } 4288 }; 4289 MODULE_DEVICE_TABLE(of, macb_dt_ids); 4290 #endif /* CONFIG_OF */ 4291 4292 static const struct macb_config default_gem_config = { 4293 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4294 MACB_CAPS_JUMBO | 4295 MACB_CAPS_GEM_HAS_PTP, 4296 .dma_burst_length = 16, 4297 .clk_init = macb_clk_init, 4298 .init = macb_init, 4299 .jumbo_max_len = 10240, 4300 }; 4301 4302 static int macb_probe(struct platform_device *pdev) 4303 { 4304 const struct macb_config *macb_config = &default_gem_config; 4305 int (*clk_init)(struct platform_device *, struct clk **, 4306 struct clk **, struct clk **, struct clk **, 4307 struct clk **) = macb_config->clk_init; 4308 int (*init)(struct platform_device *) = macb_config->init; 4309 struct device_node *np = pdev->dev.of_node; 4310 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 4311 struct clk *tsu_clk = NULL; 4312 unsigned int queue_mask, num_queues; 4313 bool native_io; 4314 phy_interface_t interface; 4315 struct net_device *dev; 4316 struct resource *regs; 4317 void __iomem *mem; 4318 const char *mac; 4319 struct macb *bp; 4320 int err, val; 4321 4322 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4323 mem = devm_ioremap_resource(&pdev->dev, regs); 4324 if (IS_ERR(mem)) 4325 return PTR_ERR(mem); 4326 4327 if (np) { 4328 const struct of_device_id *match; 4329 4330 match = of_match_node(macb_dt_ids, np); 4331 if (match && match->data) { 4332 macb_config = match->data; 4333 clk_init = macb_config->clk_init; 4334 init = macb_config->init; 4335 } 4336 } 4337 4338 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); 4339 if (err) 4340 return err; 4341 4342 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); 4343 pm_runtime_use_autosuspend(&pdev->dev); 4344 pm_runtime_get_noresume(&pdev->dev); 4345 pm_runtime_set_active(&pdev->dev); 4346 pm_runtime_enable(&pdev->dev); 4347 native_io = hw_is_native_io(mem); 4348 4349 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 4350 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 4351 if (!dev) { 4352 err = -ENOMEM; 4353 goto err_disable_clocks; 4354 } 4355 4356 dev->base_addr = regs->start; 4357 4358 SET_NETDEV_DEV(dev, &pdev->dev); 4359 4360 bp = netdev_priv(dev); 4361 bp->pdev = pdev; 4362 bp->dev = dev; 4363 bp->regs = mem; 4364 bp->native_io = native_io; 4365 if (native_io) { 4366 bp->macb_reg_readl = hw_readl_native; 4367 bp->macb_reg_writel = hw_writel_native; 4368 } else { 4369 bp->macb_reg_readl = hw_readl; 4370 bp->macb_reg_writel = hw_writel; 4371 } 4372 bp->num_queues = num_queues; 4373 bp->queue_mask = queue_mask; 4374 if (macb_config) 4375 bp->dma_burst_length = macb_config->dma_burst_length; 4376 bp->pclk = pclk; 4377 bp->hclk = hclk; 4378 bp->tx_clk = tx_clk; 4379 bp->rx_clk = rx_clk; 4380 bp->tsu_clk = tsu_clk; 4381 if (macb_config) 4382 bp->jumbo_max_len = macb_config->jumbo_max_len; 4383 4384 bp->wol = 0; 4385 if (of_get_property(np, "magic-packet", NULL)) 4386 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 4387 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4388 4389 spin_lock_init(&bp->lock); 4390 4391 /* setup capabilities */ 4392 macb_configure_caps(bp, macb_config); 4393 4394 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4395 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 4396 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 4397 bp->hw_dma_cap |= HW_DMA_CAP_64B; 4398 } 4399 #endif 4400 platform_set_drvdata(pdev, dev); 4401 4402 dev->irq = platform_get_irq(pdev, 0); 4403 if (dev->irq < 0) { 4404 err = dev->irq; 4405 goto err_out_free_netdev; 4406 } 4407 4408 /* MTU range: 68 - 1500 or 10240 */ 4409 dev->min_mtu = GEM_MTU_MIN_SIZE; 4410 if (bp->caps & MACB_CAPS_JUMBO) 4411 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 4412 else 4413 dev->max_mtu = ETH_DATA_LEN; 4414 4415 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { 4416 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); 4417 if (val) 4418 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * 4419 macb_dma_desc_get_size(bp); 4420 4421 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); 4422 if (val) 4423 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * 4424 macb_dma_desc_get_size(bp); 4425 } 4426 4427 bp->rx_intr_mask = MACB_RX_INT_FLAGS; 4428 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) 4429 bp->rx_intr_mask |= MACB_BIT(RXUBR); 4430 4431 mac = of_get_mac_address(np); 4432 if (PTR_ERR(mac) == -EPROBE_DEFER) { 4433 err = -EPROBE_DEFER; 4434 goto err_out_free_netdev; 4435 } else if (!IS_ERR_OR_NULL(mac)) { 4436 ether_addr_copy(bp->dev->dev_addr, mac); 4437 } else { 4438 macb_get_hwaddr(bp); 4439 } 4440 4441 err = of_get_phy_mode(np, &interface); 4442 if (err) 4443 /* not found in DT, MII by default */ 4444 bp->phy_interface = PHY_INTERFACE_MODE_MII; 4445 else 4446 bp->phy_interface = interface; 4447 4448 /* IP specific init */ 4449 err = init(pdev); 4450 if (err) 4451 goto err_out_free_netdev; 4452 4453 err = macb_mii_init(bp); 4454 if (err) 4455 goto err_out_free_netdev; 4456 4457 netif_carrier_off(dev); 4458 4459 err = register_netdev(dev); 4460 if (err) { 4461 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 4462 goto err_out_unregister_mdio; 4463 } 4464 4465 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, 4466 (unsigned long)bp); 4467 4468 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 4469 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 4470 dev->base_addr, dev->irq, dev->dev_addr); 4471 4472 pm_runtime_mark_last_busy(&bp->pdev->dev); 4473 pm_runtime_put_autosuspend(&bp->pdev->dev); 4474 4475 return 0; 4476 4477 err_out_unregister_mdio: 4478 mdiobus_unregister(bp->mii_bus); 4479 mdiobus_free(bp->mii_bus); 4480 4481 err_out_free_netdev: 4482 free_netdev(dev); 4483 4484 err_disable_clocks: 4485 clk_disable_unprepare(tx_clk); 4486 clk_disable_unprepare(hclk); 4487 clk_disable_unprepare(pclk); 4488 clk_disable_unprepare(rx_clk); 4489 clk_disable_unprepare(tsu_clk); 4490 pm_runtime_disable(&pdev->dev); 4491 pm_runtime_set_suspended(&pdev->dev); 4492 pm_runtime_dont_use_autosuspend(&pdev->dev); 4493 4494 return err; 4495 } 4496 4497 static int macb_remove(struct platform_device *pdev) 4498 { 4499 struct net_device *dev; 4500 struct macb *bp; 4501 4502 dev = platform_get_drvdata(pdev); 4503 4504 if (dev) { 4505 bp = netdev_priv(dev); 4506 mdiobus_unregister(bp->mii_bus); 4507 mdiobus_free(bp->mii_bus); 4508 4509 unregister_netdev(dev); 4510 tasklet_kill(&bp->hresp_err_tasklet); 4511 pm_runtime_disable(&pdev->dev); 4512 pm_runtime_dont_use_autosuspend(&pdev->dev); 4513 if (!pm_runtime_suspended(&pdev->dev)) { 4514 clk_disable_unprepare(bp->tx_clk); 4515 clk_disable_unprepare(bp->hclk); 4516 clk_disable_unprepare(bp->pclk); 4517 clk_disable_unprepare(bp->rx_clk); 4518 clk_disable_unprepare(bp->tsu_clk); 4519 pm_runtime_set_suspended(&pdev->dev); 4520 } 4521 phylink_destroy(bp->phylink); 4522 free_netdev(dev); 4523 } 4524 4525 return 0; 4526 } 4527 4528 static int __maybe_unused macb_suspend(struct device *dev) 4529 { 4530 struct net_device *netdev = dev_get_drvdata(dev); 4531 struct macb *bp = netdev_priv(netdev); 4532 struct macb_queue *queue = bp->queues; 4533 unsigned long flags; 4534 unsigned int q; 4535 4536 if (!netif_running(netdev)) 4537 return 0; 4538 4539 if (bp->wol & MACB_WOL_ENABLED) { 4540 macb_writel(bp, IER, MACB_BIT(WOL)); 4541 macb_writel(bp, WOL, MACB_BIT(MAG)); 4542 enable_irq_wake(bp->queues[0].irq); 4543 netif_device_detach(netdev); 4544 } else { 4545 netif_device_detach(netdev); 4546 for (q = 0, queue = bp->queues; q < bp->num_queues; 4547 ++q, ++queue) 4548 napi_disable(&queue->napi); 4549 rtnl_lock(); 4550 phylink_stop(bp->phylink); 4551 rtnl_unlock(); 4552 spin_lock_irqsave(&bp->lock, flags); 4553 macb_reset_hw(bp); 4554 spin_unlock_irqrestore(&bp->lock, flags); 4555 4556 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4557 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); 4558 4559 if (netdev->hw_features & NETIF_F_NTUPLE) 4560 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); 4561 } 4562 4563 netif_carrier_off(netdev); 4564 if (bp->ptp_info) 4565 bp->ptp_info->ptp_remove(netdev); 4566 pm_runtime_force_suspend(dev); 4567 4568 return 0; 4569 } 4570 4571 static int __maybe_unused macb_resume(struct device *dev) 4572 { 4573 struct net_device *netdev = dev_get_drvdata(dev); 4574 struct macb *bp = netdev_priv(netdev); 4575 struct macb_queue *queue = bp->queues; 4576 unsigned int q; 4577 4578 if (!netif_running(netdev)) 4579 return 0; 4580 4581 pm_runtime_force_resume(dev); 4582 4583 if (bp->wol & MACB_WOL_ENABLED) { 4584 macb_writel(bp, IDR, MACB_BIT(WOL)); 4585 macb_writel(bp, WOL, 0); 4586 disable_irq_wake(bp->queues[0].irq); 4587 } else { 4588 macb_writel(bp, NCR, MACB_BIT(MPE)); 4589 4590 if (netdev->hw_features & NETIF_F_NTUPLE) 4591 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); 4592 4593 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4594 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); 4595 4596 for (q = 0, queue = bp->queues; q < bp->num_queues; 4597 ++q, ++queue) 4598 napi_enable(&queue->napi); 4599 rtnl_lock(); 4600 phylink_start(bp->phylink); 4601 rtnl_unlock(); 4602 } 4603 4604 macb_init_hw(bp); 4605 macb_set_rx_mode(netdev); 4606 macb_restore_features(bp); 4607 netif_device_attach(netdev); 4608 if (bp->ptp_info) 4609 bp->ptp_info->ptp_init(netdev); 4610 4611 return 0; 4612 } 4613 4614 static int __maybe_unused macb_runtime_suspend(struct device *dev) 4615 { 4616 struct net_device *netdev = dev_get_drvdata(dev); 4617 struct macb *bp = netdev_priv(netdev); 4618 4619 if (!(device_may_wakeup(&bp->dev->dev))) { 4620 clk_disable_unprepare(bp->tx_clk); 4621 clk_disable_unprepare(bp->hclk); 4622 clk_disable_unprepare(bp->pclk); 4623 clk_disable_unprepare(bp->rx_clk); 4624 } 4625 clk_disable_unprepare(bp->tsu_clk); 4626 4627 return 0; 4628 } 4629 4630 static int __maybe_unused macb_runtime_resume(struct device *dev) 4631 { 4632 struct net_device *netdev = dev_get_drvdata(dev); 4633 struct macb *bp = netdev_priv(netdev); 4634 4635 if (!(device_may_wakeup(&bp->dev->dev))) { 4636 clk_prepare_enable(bp->pclk); 4637 clk_prepare_enable(bp->hclk); 4638 clk_prepare_enable(bp->tx_clk); 4639 clk_prepare_enable(bp->rx_clk); 4640 } 4641 clk_prepare_enable(bp->tsu_clk); 4642 4643 return 0; 4644 } 4645 4646 static const struct dev_pm_ops macb_pm_ops = { 4647 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) 4648 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) 4649 }; 4650 4651 static struct platform_driver macb_driver = { 4652 .probe = macb_probe, 4653 .remove = macb_remove, 4654 .driver = { 4655 .name = "macb", 4656 .of_match_table = of_match_ptr(macb_dt_ids), 4657 .pm = &macb_pm_ops, 4658 }, 4659 }; 4660 4661 module_platform_driver(macb_driver); 4662 4663 MODULE_LICENSE("GPL"); 4664 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 4665 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 4666 MODULE_ALIAS("platform:macb"); 4667