1 /* This program is free software; you can redistribute it and/or modify 2 * it under the terms of the GNU General Public License as published by 3 * the Free Software Foundation; version 2 of the License 4 * 5 * This program is distributed in the hope that it will be useful, 6 * but WITHOUT ANY WARRANTY; without even the implied warranty of 7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 8 * GNU General Public License for more details. 9 * 10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> 11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> 12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> 13 */ 14 15 #include <linux/of_device.h> 16 #include <linux/of_mdio.h> 17 #include <linux/of_net.h> 18 #include <linux/mfd/syscon.h> 19 #include <linux/regmap.h> 20 #include <linux/clk.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/if_vlan.h> 23 #include <linux/reset.h> 24 #include <linux/tcp.h> 25 #include <linux/interrupt.h> 26 #include <linux/pinctrl/devinfo.h> 27 28 #include "mtk_eth_soc.h" 29 30 static int mtk_msg_level = -1; 31 module_param_named(msg_level, mtk_msg_level, int, 0); 32 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); 33 34 #define MTK_ETHTOOL_STAT(x) { #x, \ 35 offsetof(struct mtk_hw_stats, x) / sizeof(u64) } 36 37 /* strings used by ethtool */ 38 static const struct mtk_ethtool_stats { 39 char str[ETH_GSTRING_LEN]; 40 u32 offset; 41 } mtk_ethtool_stats[] = { 42 MTK_ETHTOOL_STAT(tx_bytes), 43 MTK_ETHTOOL_STAT(tx_packets), 44 MTK_ETHTOOL_STAT(tx_skip), 45 MTK_ETHTOOL_STAT(tx_collisions), 46 MTK_ETHTOOL_STAT(rx_bytes), 47 MTK_ETHTOOL_STAT(rx_packets), 48 MTK_ETHTOOL_STAT(rx_overflow), 49 MTK_ETHTOOL_STAT(rx_fcs_errors), 50 MTK_ETHTOOL_STAT(rx_short_errors), 51 MTK_ETHTOOL_STAT(rx_long_errors), 52 MTK_ETHTOOL_STAT(rx_checksum_errors), 53 MTK_ETHTOOL_STAT(rx_flow_control_packets), 54 }; 55 56 static const char * const mtk_clks_source_name[] = { 57 "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m", 58 "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" 59 }; 60 61 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) 62 { 63 __raw_writel(val, eth->base + reg); 64 } 65 66 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) 67 { 68 return __raw_readl(eth->base + reg); 69 } 70 71 static int mtk_mdio_busy_wait(struct mtk_eth *eth) 72 { 73 unsigned long t_start = jiffies; 74 75 while (1) { 76 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) 77 return 0; 78 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) 79 break; 80 usleep_range(10, 20); 81 } 82 83 dev_err(eth->dev, "mdio: MDIO timeout\n"); 84 return -1; 85 } 86 87 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, 88 u32 phy_register, u32 write_data) 89 { 90 if (mtk_mdio_busy_wait(eth)) 91 return -1; 92 93 write_data &= 0xffff; 94 95 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | 96 (phy_register << PHY_IAC_REG_SHIFT) | 97 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data, 98 MTK_PHY_IAC); 99 100 if (mtk_mdio_busy_wait(eth)) 101 return -1; 102 103 return 0; 104 } 105 106 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) 107 { 108 u32 d; 109 110 if (mtk_mdio_busy_wait(eth)) 111 return 0xffff; 112 113 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | 114 (phy_reg << PHY_IAC_REG_SHIFT) | 115 (phy_addr << PHY_IAC_ADDR_SHIFT), 116 MTK_PHY_IAC); 117 118 if (mtk_mdio_busy_wait(eth)) 119 return 0xffff; 120 121 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; 122 123 return d; 124 } 125 126 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, 127 int phy_reg, u16 val) 128 { 129 struct mtk_eth *eth = bus->priv; 130 131 return _mtk_mdio_write(eth, phy_addr, phy_reg, val); 132 } 133 134 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) 135 { 136 struct mtk_eth *eth = bus->priv; 137 138 return _mtk_mdio_read(eth, phy_addr, phy_reg); 139 } 140 141 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) 142 { 143 u32 val; 144 int ret; 145 146 val = (speed == SPEED_1000) ? 147 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; 148 mtk_w32(eth, val, INTF_MODE); 149 150 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, 151 ETHSYS_TRGMII_CLK_SEL362_5, 152 ETHSYS_TRGMII_CLK_SEL362_5); 153 154 val = (speed == SPEED_1000) ? 250000000 : 500000000; 155 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); 156 if (ret) 157 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); 158 159 val = (speed == SPEED_1000) ? 160 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100; 161 mtk_w32(eth, val, TRGMII_RCK_CTRL); 162 163 val = (speed == SPEED_1000) ? 164 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100; 165 mtk_w32(eth, val, TRGMII_TCK_CTRL); 166 } 167 168 static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id) 169 { 170 u32 val; 171 172 /* Setup the link timer and QPHY power up inside SGMIISYS */ 173 regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER, 174 SGMII_LINK_TIMER_DEFAULT); 175 176 regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val); 177 val |= SGMII_REMOTE_FAULT_DIS; 178 regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val); 179 180 regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val); 181 val |= SGMII_AN_RESTART; 182 regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val); 183 184 regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val); 185 val &= ~SGMII_PHYA_PWD; 186 regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val); 187 188 /* Determine MUX for which GMAC uses the SGMII interface */ 189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) { 190 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); 191 val &= ~SYSCFG0_SGMII_MASK; 192 val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2; 193 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); 194 195 dev_info(eth->dev, "setup shared sgmii for gmac=%d\n", 196 mac_id); 197 } 198 199 /* Setup the GMAC1 going through SGMII path when SoC also support 200 * ESW on GMAC1 201 */ 202 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) && 203 !mac_id) { 204 mtk_w32(eth, 0, MTK_MAC_MISC); 205 dev_info(eth->dev, "setup gmac1 going through sgmii"); 206 } 207 } 208 209 static void mtk_phy_link_adjust(struct net_device *dev) 210 { 211 struct mtk_mac *mac = netdev_priv(dev); 212 u16 lcl_adv = 0, rmt_adv = 0; 213 u8 flowctrl; 214 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | 215 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | 216 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | 217 MAC_MCR_BACKPR_EN; 218 219 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) 220 return; 221 222 switch (dev->phydev->speed) { 223 case SPEED_1000: 224 mcr |= MAC_MCR_SPEED_1000; 225 break; 226 case SPEED_100: 227 mcr |= MAC_MCR_SPEED_100; 228 break; 229 }; 230 231 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && 232 !mac->id && !mac->trgmii) 233 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed); 234 235 if (dev->phydev->link) 236 mcr |= MAC_MCR_FORCE_LINK; 237 238 if (dev->phydev->duplex) { 239 mcr |= MAC_MCR_FORCE_DPX; 240 241 if (dev->phydev->pause) 242 rmt_adv = LPA_PAUSE_CAP; 243 if (dev->phydev->asym_pause) 244 rmt_adv |= LPA_PAUSE_ASYM; 245 246 if (dev->phydev->advertising & ADVERTISED_Pause) 247 lcl_adv |= ADVERTISE_PAUSE_CAP; 248 if (dev->phydev->advertising & ADVERTISED_Asym_Pause) 249 lcl_adv |= ADVERTISE_PAUSE_ASYM; 250 251 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 252 253 if (flowctrl & FLOW_CTRL_TX) 254 mcr |= MAC_MCR_FORCE_TX_FC; 255 if (flowctrl & FLOW_CTRL_RX) 256 mcr |= MAC_MCR_FORCE_RX_FC; 257 258 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", 259 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", 260 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); 261 } 262 263 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); 264 265 if (dev->phydev->link) 266 netif_carrier_on(dev); 267 else 268 netif_carrier_off(dev); 269 270 if (!of_phy_is_fixed_link(mac->of_node)) 271 phy_print_status(dev->phydev); 272 } 273 274 static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, 275 struct device_node *phy_node) 276 { 277 struct phy_device *phydev; 278 int phy_mode; 279 280 phy_mode = of_get_phy_mode(phy_node); 281 if (phy_mode < 0) { 282 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); 283 return -EINVAL; 284 } 285 286 phydev = of_phy_connect(eth->netdev[mac->id], phy_node, 287 mtk_phy_link_adjust, 0, phy_mode); 288 if (!phydev) { 289 dev_err(eth->dev, "could not connect to PHY\n"); 290 return -ENODEV; 291 } 292 293 dev_info(eth->dev, 294 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", 295 mac->id, phydev_name(phydev), phydev->phy_id, 296 phydev->drv->name); 297 298 return 0; 299 } 300 301 static int mtk_phy_connect(struct net_device *dev) 302 { 303 struct mtk_mac *mac = netdev_priv(dev); 304 struct mtk_eth *eth; 305 struct device_node *np; 306 u32 val; 307 308 eth = mac->hw; 309 np = of_parse_phandle(mac->of_node, "phy-handle", 0); 310 if (!np && of_phy_is_fixed_link(mac->of_node)) 311 if (!of_phy_register_fixed_link(mac->of_node)) 312 np = of_node_get(mac->of_node); 313 if (!np) 314 return -ENODEV; 315 316 mac->ge_mode = 0; 317 switch (of_get_phy_mode(np)) { 318 case PHY_INTERFACE_MODE_TRGMII: 319 mac->trgmii = true; 320 case PHY_INTERFACE_MODE_RGMII_TXID: 321 case PHY_INTERFACE_MODE_RGMII_RXID: 322 case PHY_INTERFACE_MODE_RGMII_ID: 323 case PHY_INTERFACE_MODE_RGMII: 324 break; 325 case PHY_INTERFACE_MODE_SGMII: 326 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) 327 mtk_gmac_sgmii_hw_setup(eth, mac->id); 328 break; 329 case PHY_INTERFACE_MODE_MII: 330 mac->ge_mode = 1; 331 break; 332 case PHY_INTERFACE_MODE_REVMII: 333 mac->ge_mode = 2; 334 break; 335 case PHY_INTERFACE_MODE_RMII: 336 if (!mac->id) 337 goto err_phy; 338 mac->ge_mode = 3; 339 break; 340 default: 341 goto err_phy; 342 } 343 344 /* put the gmac into the right mode */ 345 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); 346 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); 347 val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); 348 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); 349 350 /* couple phydev to net_device */ 351 if (mtk_phy_connect_node(eth, mac, np)) 352 goto err_phy; 353 354 dev->phydev->autoneg = AUTONEG_ENABLE; 355 dev->phydev->speed = 0; 356 dev->phydev->duplex = 0; 357 358 if (of_phy_is_fixed_link(mac->of_node)) 359 dev->phydev->supported |= 360 SUPPORTED_Pause | SUPPORTED_Asym_Pause; 361 362 dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | 363 SUPPORTED_Asym_Pause; 364 dev->phydev->advertising = dev->phydev->supported | 365 ADVERTISED_Autoneg; 366 phy_start_aneg(dev->phydev); 367 368 of_node_put(np); 369 370 return 0; 371 372 err_phy: 373 if (of_phy_is_fixed_link(mac->of_node)) 374 of_phy_deregister_fixed_link(mac->of_node); 375 of_node_put(np); 376 dev_err(eth->dev, "%s: invalid phy\n", __func__); 377 return -EINVAL; 378 } 379 380 static int mtk_mdio_init(struct mtk_eth *eth) 381 { 382 struct device_node *mii_np; 383 int ret; 384 385 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); 386 if (!mii_np) { 387 dev_err(eth->dev, "no %s child node found", "mdio-bus"); 388 return -ENODEV; 389 } 390 391 if (!of_device_is_available(mii_np)) { 392 ret = -ENODEV; 393 goto err_put_node; 394 } 395 396 eth->mii_bus = devm_mdiobus_alloc(eth->dev); 397 if (!eth->mii_bus) { 398 ret = -ENOMEM; 399 goto err_put_node; 400 } 401 402 eth->mii_bus->name = "mdio"; 403 eth->mii_bus->read = mtk_mdio_read; 404 eth->mii_bus->write = mtk_mdio_write; 405 eth->mii_bus->priv = eth; 406 eth->mii_bus->parent = eth->dev; 407 408 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); 409 ret = of_mdiobus_register(eth->mii_bus, mii_np); 410 411 err_put_node: 412 of_node_put(mii_np); 413 return ret; 414 } 415 416 static void mtk_mdio_cleanup(struct mtk_eth *eth) 417 { 418 if (!eth->mii_bus) 419 return; 420 421 mdiobus_unregister(eth->mii_bus); 422 } 423 424 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) 425 { 426 unsigned long flags; 427 u32 val; 428 429 spin_lock_irqsave(ð->tx_irq_lock, flags); 430 val = mtk_r32(eth, MTK_QDMA_INT_MASK); 431 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); 432 spin_unlock_irqrestore(ð->tx_irq_lock, flags); 433 } 434 435 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) 436 { 437 unsigned long flags; 438 u32 val; 439 440 spin_lock_irqsave(ð->tx_irq_lock, flags); 441 val = mtk_r32(eth, MTK_QDMA_INT_MASK); 442 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); 443 spin_unlock_irqrestore(ð->tx_irq_lock, flags); 444 } 445 446 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) 447 { 448 unsigned long flags; 449 u32 val; 450 451 spin_lock_irqsave(ð->rx_irq_lock, flags); 452 val = mtk_r32(eth, MTK_PDMA_INT_MASK); 453 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); 454 spin_unlock_irqrestore(ð->rx_irq_lock, flags); 455 } 456 457 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) 458 { 459 unsigned long flags; 460 u32 val; 461 462 spin_lock_irqsave(ð->rx_irq_lock, flags); 463 val = mtk_r32(eth, MTK_PDMA_INT_MASK); 464 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); 465 spin_unlock_irqrestore(ð->rx_irq_lock, flags); 466 } 467 468 static int mtk_set_mac_address(struct net_device *dev, void *p) 469 { 470 int ret = eth_mac_addr(dev, p); 471 struct mtk_mac *mac = netdev_priv(dev); 472 const char *macaddr = dev->dev_addr; 473 474 if (ret) 475 return ret; 476 477 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) 478 return -EBUSY; 479 480 spin_lock_bh(&mac->hw->page_lock); 481 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], 482 MTK_GDMA_MAC_ADRH(mac->id)); 483 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | 484 (macaddr[4] << 8) | macaddr[5], 485 MTK_GDMA_MAC_ADRL(mac->id)); 486 spin_unlock_bh(&mac->hw->page_lock); 487 488 return 0; 489 } 490 491 void mtk_stats_update_mac(struct mtk_mac *mac) 492 { 493 struct mtk_hw_stats *hw_stats = mac->hw_stats; 494 unsigned int base = MTK_GDM1_TX_GBCNT; 495 u64 stats; 496 497 base += hw_stats->reg_offset; 498 499 u64_stats_update_begin(&hw_stats->syncp); 500 501 hw_stats->rx_bytes += mtk_r32(mac->hw, base); 502 stats = mtk_r32(mac->hw, base + 0x04); 503 if (stats) 504 hw_stats->rx_bytes += (stats << 32); 505 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); 506 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); 507 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); 508 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); 509 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); 510 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); 511 hw_stats->rx_flow_control_packets += 512 mtk_r32(mac->hw, base + 0x24); 513 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); 514 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); 515 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); 516 stats = mtk_r32(mac->hw, base + 0x34); 517 if (stats) 518 hw_stats->tx_bytes += (stats << 32); 519 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); 520 u64_stats_update_end(&hw_stats->syncp); 521 } 522 523 static void mtk_stats_update(struct mtk_eth *eth) 524 { 525 int i; 526 527 for (i = 0; i < MTK_MAC_COUNT; i++) { 528 if (!eth->mac[i] || !eth->mac[i]->hw_stats) 529 continue; 530 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { 531 mtk_stats_update_mac(eth->mac[i]); 532 spin_unlock(ð->mac[i]->hw_stats->stats_lock); 533 } 534 } 535 } 536 537 static void mtk_get_stats64(struct net_device *dev, 538 struct rtnl_link_stats64 *storage) 539 { 540 struct mtk_mac *mac = netdev_priv(dev); 541 struct mtk_hw_stats *hw_stats = mac->hw_stats; 542 unsigned int start; 543 544 if (netif_running(dev) && netif_device_present(dev)) { 545 if (spin_trylock_bh(&hw_stats->stats_lock)) { 546 mtk_stats_update_mac(mac); 547 spin_unlock_bh(&hw_stats->stats_lock); 548 } 549 } 550 551 do { 552 start = u64_stats_fetch_begin_irq(&hw_stats->syncp); 553 storage->rx_packets = hw_stats->rx_packets; 554 storage->tx_packets = hw_stats->tx_packets; 555 storage->rx_bytes = hw_stats->rx_bytes; 556 storage->tx_bytes = hw_stats->tx_bytes; 557 storage->collisions = hw_stats->tx_collisions; 558 storage->rx_length_errors = hw_stats->rx_short_errors + 559 hw_stats->rx_long_errors; 560 storage->rx_over_errors = hw_stats->rx_overflow; 561 storage->rx_crc_errors = hw_stats->rx_fcs_errors; 562 storage->rx_errors = hw_stats->rx_checksum_errors; 563 storage->tx_aborted_errors = hw_stats->tx_skip; 564 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); 565 566 storage->tx_errors = dev->stats.tx_errors; 567 storage->rx_dropped = dev->stats.rx_dropped; 568 storage->tx_dropped = dev->stats.tx_dropped; 569 } 570 571 static inline int mtk_max_frag_size(int mtu) 572 { 573 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ 574 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) 575 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; 576 577 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + 578 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 579 } 580 581 static inline int mtk_max_buf_size(int frag_size) 582 { 583 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - 584 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 585 586 WARN_ON(buf_size < MTK_MAX_RX_LENGTH); 587 588 return buf_size; 589 } 590 591 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, 592 struct mtk_rx_dma *dma_rxd) 593 { 594 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); 595 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); 596 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); 597 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); 598 } 599 600 /* the qdma core needs scratch memory to be setup */ 601 static int mtk_init_fq_dma(struct mtk_eth *eth) 602 { 603 dma_addr_t phy_ring_tail; 604 int cnt = MTK_DMA_SIZE; 605 dma_addr_t dma_addr; 606 int i; 607 608 eth->scratch_ring = dma_alloc_coherent(eth->dev, 609 cnt * sizeof(struct mtk_tx_dma), 610 ð->phy_scratch_ring, 611 GFP_ATOMIC | __GFP_ZERO); 612 if (unlikely(!eth->scratch_ring)) 613 return -ENOMEM; 614 615 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, 616 GFP_KERNEL); 617 if (unlikely(!eth->scratch_head)) 618 return -ENOMEM; 619 620 dma_addr = dma_map_single(eth->dev, 621 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, 622 DMA_FROM_DEVICE); 623 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) 624 return -ENOMEM; 625 626 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); 627 phy_ring_tail = eth->phy_scratch_ring + 628 (sizeof(struct mtk_tx_dma) * (cnt - 1)); 629 630 for (i = 0; i < cnt; i++) { 631 eth->scratch_ring[i].txd1 = 632 (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); 633 if (i < cnt - 1) 634 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + 635 ((i + 1) * sizeof(struct mtk_tx_dma))); 636 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); 637 } 638 639 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); 640 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); 641 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); 642 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); 643 644 return 0; 645 } 646 647 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) 648 { 649 void *ret = ring->dma; 650 651 return ret + (desc - ring->phys); 652 } 653 654 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, 655 struct mtk_tx_dma *txd) 656 { 657 int idx = txd - ring->dma; 658 659 return &ring->buf[idx]; 660 } 661 662 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) 663 { 664 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 665 dma_unmap_single(eth->dev, 666 dma_unmap_addr(tx_buf, dma_addr0), 667 dma_unmap_len(tx_buf, dma_len0), 668 DMA_TO_DEVICE); 669 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 670 dma_unmap_page(eth->dev, 671 dma_unmap_addr(tx_buf, dma_addr0), 672 dma_unmap_len(tx_buf, dma_len0), 673 DMA_TO_DEVICE); 674 } 675 tx_buf->flags = 0; 676 if (tx_buf->skb && 677 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) 678 dev_kfree_skb_any(tx_buf->skb); 679 tx_buf->skb = NULL; 680 } 681 682 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, 683 int tx_num, struct mtk_tx_ring *ring, bool gso) 684 { 685 struct mtk_mac *mac = netdev_priv(dev); 686 struct mtk_eth *eth = mac->hw; 687 struct mtk_tx_dma *itxd, *txd; 688 struct mtk_tx_buf *itx_buf, *tx_buf; 689 dma_addr_t mapped_addr; 690 unsigned int nr_frags; 691 int i, n_desc = 1; 692 u32 txd4 = 0, fport; 693 694 itxd = ring->next_free; 695 if (itxd == ring->last_free) 696 return -ENOMEM; 697 698 /* set the forward port */ 699 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; 700 txd4 |= fport; 701 702 itx_buf = mtk_desc_to_tx_buf(ring, itxd); 703 memset(itx_buf, 0, sizeof(*itx_buf)); 704 705 if (gso) 706 txd4 |= TX_DMA_TSO; 707 708 /* TX Checksum offload */ 709 if (skb->ip_summed == CHECKSUM_PARTIAL) 710 txd4 |= TX_DMA_CHKSUM; 711 712 /* VLAN header offload */ 713 if (skb_vlan_tag_present(skb)) 714 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); 715 716 mapped_addr = dma_map_single(eth->dev, skb->data, 717 skb_headlen(skb), DMA_TO_DEVICE); 718 if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) 719 return -ENOMEM; 720 721 WRITE_ONCE(itxd->txd1, mapped_addr); 722 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; 723 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : 724 MTK_TX_FLAGS_FPORT1; 725 dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr); 726 dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb)); 727 728 /* TX SG offload */ 729 txd = itxd; 730 nr_frags = skb_shinfo(skb)->nr_frags; 731 for (i = 0; i < nr_frags; i++) { 732 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 733 unsigned int offset = 0; 734 int frag_size = skb_frag_size(frag); 735 736 while (frag_size) { 737 bool last_frag = false; 738 unsigned int frag_map_size; 739 740 txd = mtk_qdma_phys_to_virt(ring, txd->txd2); 741 if (txd == ring->last_free) 742 goto err_dma; 743 744 n_desc++; 745 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); 746 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, 747 frag_map_size, 748 DMA_TO_DEVICE); 749 if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) 750 goto err_dma; 751 752 if (i == nr_frags - 1 && 753 (frag_size - frag_map_size) == 0) 754 last_frag = true; 755 756 WRITE_ONCE(txd->txd1, mapped_addr); 757 WRITE_ONCE(txd->txd3, (TX_DMA_SWC | 758 TX_DMA_PLEN0(frag_map_size) | 759 last_frag * TX_DMA_LS0)); 760 WRITE_ONCE(txd->txd4, fport); 761 762 tx_buf = mtk_desc_to_tx_buf(ring, txd); 763 memset(tx_buf, 0, sizeof(*tx_buf)); 764 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; 765 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; 766 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : 767 MTK_TX_FLAGS_FPORT1; 768 769 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 770 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); 771 frag_size -= frag_map_size; 772 offset += frag_map_size; 773 } 774 } 775 776 /* store skb to cleanup */ 777 itx_buf->skb = skb; 778 779 WRITE_ONCE(itxd->txd4, txd4); 780 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | 781 (!nr_frags * TX_DMA_LS0))); 782 783 netdev_sent_queue(dev, skb->len); 784 skb_tx_timestamp(skb); 785 786 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); 787 atomic_sub(n_desc, &ring->free_count); 788 789 /* make sure that all changes to the dma ring are flushed before we 790 * continue 791 */ 792 wmb(); 793 794 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) 795 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); 796 797 return 0; 798 799 err_dma: 800 do { 801 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 802 803 /* unmap dma */ 804 mtk_tx_unmap(eth, tx_buf); 805 806 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 807 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); 808 } while (itxd != txd); 809 810 return -ENOMEM; 811 } 812 813 static inline int mtk_cal_txd_req(struct sk_buff *skb) 814 { 815 int i, nfrags; 816 struct skb_frag_struct *frag; 817 818 nfrags = 1; 819 if (skb_is_gso(skb)) { 820 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 821 frag = &skb_shinfo(skb)->frags[i]; 822 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); 823 } 824 } else { 825 nfrags += skb_shinfo(skb)->nr_frags; 826 } 827 828 return nfrags; 829 } 830 831 static int mtk_queue_stopped(struct mtk_eth *eth) 832 { 833 int i; 834 835 for (i = 0; i < MTK_MAC_COUNT; i++) { 836 if (!eth->netdev[i]) 837 continue; 838 if (netif_queue_stopped(eth->netdev[i])) 839 return 1; 840 } 841 842 return 0; 843 } 844 845 static void mtk_wake_queue(struct mtk_eth *eth) 846 { 847 int i; 848 849 for (i = 0; i < MTK_MAC_COUNT; i++) { 850 if (!eth->netdev[i]) 851 continue; 852 netif_wake_queue(eth->netdev[i]); 853 } 854 } 855 856 static void mtk_stop_queue(struct mtk_eth *eth) 857 { 858 int i; 859 860 for (i = 0; i < MTK_MAC_COUNT; i++) { 861 if (!eth->netdev[i]) 862 continue; 863 netif_stop_queue(eth->netdev[i]); 864 } 865 } 866 867 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) 868 { 869 struct mtk_mac *mac = netdev_priv(dev); 870 struct mtk_eth *eth = mac->hw; 871 struct mtk_tx_ring *ring = ð->tx_ring; 872 struct net_device_stats *stats = &dev->stats; 873 bool gso = false; 874 int tx_num; 875 876 /* normally we can rely on the stack not calling this more than once, 877 * however we have 2 queues running on the same ring so we need to lock 878 * the ring access 879 */ 880 spin_lock(ð->page_lock); 881 882 if (unlikely(test_bit(MTK_RESETTING, ð->state))) 883 goto drop; 884 885 tx_num = mtk_cal_txd_req(skb); 886 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { 887 mtk_stop_queue(eth); 888 netif_err(eth, tx_queued, dev, 889 "Tx Ring full when queue awake!\n"); 890 spin_unlock(ð->page_lock); 891 return NETDEV_TX_BUSY; 892 } 893 894 /* TSO: fill MSS info in tcp checksum field */ 895 if (skb_is_gso(skb)) { 896 if (skb_cow_head(skb, 0)) { 897 netif_warn(eth, tx_err, dev, 898 "GSO expand head fail.\n"); 899 goto drop; 900 } 901 902 if (skb_shinfo(skb)->gso_type & 903 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 904 gso = true; 905 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); 906 } 907 } 908 909 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) 910 goto drop; 911 912 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) 913 mtk_stop_queue(eth); 914 915 spin_unlock(ð->page_lock); 916 917 return NETDEV_TX_OK; 918 919 drop: 920 spin_unlock(ð->page_lock); 921 stats->tx_dropped++; 922 dev_kfree_skb_any(skb); 923 return NETDEV_TX_OK; 924 } 925 926 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) 927 { 928 int i; 929 struct mtk_rx_ring *ring; 930 int idx; 931 932 if (!eth->hwlro) 933 return ð->rx_ring[0]; 934 935 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { 936 ring = ð->rx_ring[i]; 937 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); 938 if (ring->dma[idx].rxd2 & RX_DMA_DONE) { 939 ring->calc_idx_update = true; 940 return ring; 941 } 942 } 943 944 return NULL; 945 } 946 947 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) 948 { 949 struct mtk_rx_ring *ring; 950 int i; 951 952 if (!eth->hwlro) { 953 ring = ð->rx_ring[0]; 954 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); 955 } else { 956 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { 957 ring = ð->rx_ring[i]; 958 if (ring->calc_idx_update) { 959 ring->calc_idx_update = false; 960 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); 961 } 962 } 963 } 964 } 965 966 static int mtk_poll_rx(struct napi_struct *napi, int budget, 967 struct mtk_eth *eth) 968 { 969 struct mtk_rx_ring *ring; 970 int idx; 971 struct sk_buff *skb; 972 u8 *data, *new_data; 973 struct mtk_rx_dma *rxd, trxd; 974 int done = 0; 975 976 while (done < budget) { 977 struct net_device *netdev; 978 unsigned int pktlen; 979 dma_addr_t dma_addr; 980 int mac = 0; 981 982 ring = mtk_get_rx_ring(eth); 983 if (unlikely(!ring)) 984 goto rx_done; 985 986 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); 987 rxd = &ring->dma[idx]; 988 data = ring->data[idx]; 989 990 mtk_rx_get_desc(&trxd, rxd); 991 if (!(trxd.rxd2 & RX_DMA_DONE)) 992 break; 993 994 /* find out which mac the packet come from. values start at 1 */ 995 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & 996 RX_DMA_FPORT_MASK; 997 mac--; 998 999 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || 1000 !eth->netdev[mac])) 1001 goto release_desc; 1002 1003 netdev = eth->netdev[mac]; 1004 1005 if (unlikely(test_bit(MTK_RESETTING, ð->state))) 1006 goto release_desc; 1007 1008 /* alloc new buffer */ 1009 new_data = napi_alloc_frag(ring->frag_size); 1010 if (unlikely(!new_data)) { 1011 netdev->stats.rx_dropped++; 1012 goto release_desc; 1013 } 1014 dma_addr = dma_map_single(eth->dev, 1015 new_data + NET_SKB_PAD, 1016 ring->buf_size, 1017 DMA_FROM_DEVICE); 1018 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { 1019 skb_free_frag(new_data); 1020 netdev->stats.rx_dropped++; 1021 goto release_desc; 1022 } 1023 1024 /* receive data */ 1025 skb = build_skb(data, ring->frag_size); 1026 if (unlikely(!skb)) { 1027 skb_free_frag(new_data); 1028 netdev->stats.rx_dropped++; 1029 goto release_desc; 1030 } 1031 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 1032 1033 dma_unmap_single(eth->dev, trxd.rxd1, 1034 ring->buf_size, DMA_FROM_DEVICE); 1035 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); 1036 skb->dev = netdev; 1037 skb_put(skb, pktlen); 1038 if (trxd.rxd4 & RX_DMA_L4_VALID) 1039 skb->ip_summed = CHECKSUM_UNNECESSARY; 1040 else 1041 skb_checksum_none_assert(skb); 1042 skb->protocol = eth_type_trans(skb, netdev); 1043 1044 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && 1045 RX_DMA_VID(trxd.rxd3)) 1046 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1047 RX_DMA_VID(trxd.rxd3)); 1048 skb_record_rx_queue(skb, 0); 1049 napi_gro_receive(napi, skb); 1050 1051 ring->data[idx] = new_data; 1052 rxd->rxd1 = (unsigned int)dma_addr; 1053 1054 release_desc: 1055 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); 1056 1057 ring->calc_idx = idx; 1058 1059 done++; 1060 } 1061 1062 rx_done: 1063 if (done) { 1064 /* make sure that all changes to the dma ring are flushed before 1065 * we continue 1066 */ 1067 wmb(); 1068 mtk_update_rx_cpu_idx(eth); 1069 } 1070 1071 return done; 1072 } 1073 1074 static int mtk_poll_tx(struct mtk_eth *eth, int budget) 1075 { 1076 struct mtk_tx_ring *ring = ð->tx_ring; 1077 struct mtk_tx_dma *desc; 1078 struct sk_buff *skb; 1079 struct mtk_tx_buf *tx_buf; 1080 unsigned int done[MTK_MAX_DEVS]; 1081 unsigned int bytes[MTK_MAX_DEVS]; 1082 u32 cpu, dma; 1083 int total = 0, i; 1084 1085 memset(done, 0, sizeof(done)); 1086 memset(bytes, 0, sizeof(bytes)); 1087 1088 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); 1089 dma = mtk_r32(eth, MTK_QTX_DRX_PTR); 1090 1091 desc = mtk_qdma_phys_to_virt(ring, cpu); 1092 1093 while ((cpu != dma) && budget) { 1094 u32 next_cpu = desc->txd2; 1095 int mac = 0; 1096 1097 desc = mtk_qdma_phys_to_virt(ring, desc->txd2); 1098 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) 1099 break; 1100 1101 tx_buf = mtk_desc_to_tx_buf(ring, desc); 1102 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) 1103 mac = 1; 1104 1105 skb = tx_buf->skb; 1106 if (!skb) 1107 break; 1108 1109 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { 1110 bytes[mac] += skb->len; 1111 done[mac]++; 1112 budget--; 1113 } 1114 mtk_tx_unmap(eth, tx_buf); 1115 1116 ring->last_free = desc; 1117 atomic_inc(&ring->free_count); 1118 1119 cpu = next_cpu; 1120 } 1121 1122 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); 1123 1124 for (i = 0; i < MTK_MAC_COUNT; i++) { 1125 if (!eth->netdev[i] || !done[i]) 1126 continue; 1127 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); 1128 total += done[i]; 1129 } 1130 1131 if (mtk_queue_stopped(eth) && 1132 (atomic_read(&ring->free_count) > ring->thresh)) 1133 mtk_wake_queue(eth); 1134 1135 return total; 1136 } 1137 1138 static void mtk_handle_status_irq(struct mtk_eth *eth) 1139 { 1140 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); 1141 1142 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) { 1143 mtk_stats_update(eth); 1144 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), 1145 MTK_INT_STATUS2); 1146 } 1147 } 1148 1149 static int mtk_napi_tx(struct napi_struct *napi, int budget) 1150 { 1151 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); 1152 u32 status, mask; 1153 int tx_done = 0; 1154 1155 mtk_handle_status_irq(eth); 1156 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); 1157 tx_done = mtk_poll_tx(eth, budget); 1158 1159 if (unlikely(netif_msg_intr(eth))) { 1160 status = mtk_r32(eth, MTK_QMTK_INT_STATUS); 1161 mask = mtk_r32(eth, MTK_QDMA_INT_MASK); 1162 dev_info(eth->dev, 1163 "done tx %d, intr 0x%08x/0x%x\n", 1164 tx_done, status, mask); 1165 } 1166 1167 if (tx_done == budget) 1168 return budget; 1169 1170 status = mtk_r32(eth, MTK_QMTK_INT_STATUS); 1171 if (status & MTK_TX_DONE_INT) 1172 return budget; 1173 1174 napi_complete(napi); 1175 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); 1176 1177 return tx_done; 1178 } 1179 1180 static int mtk_napi_rx(struct napi_struct *napi, int budget) 1181 { 1182 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); 1183 u32 status, mask; 1184 int rx_done = 0; 1185 int remain_budget = budget; 1186 1187 mtk_handle_status_irq(eth); 1188 1189 poll_again: 1190 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); 1191 rx_done = mtk_poll_rx(napi, remain_budget, eth); 1192 1193 if (unlikely(netif_msg_intr(eth))) { 1194 status = mtk_r32(eth, MTK_PDMA_INT_STATUS); 1195 mask = mtk_r32(eth, MTK_PDMA_INT_MASK); 1196 dev_info(eth->dev, 1197 "done rx %d, intr 0x%08x/0x%x\n", 1198 rx_done, status, mask); 1199 } 1200 if (rx_done == remain_budget) 1201 return budget; 1202 1203 status = mtk_r32(eth, MTK_PDMA_INT_STATUS); 1204 if (status & MTK_RX_DONE_INT) { 1205 remain_budget -= rx_done; 1206 goto poll_again; 1207 } 1208 napi_complete(napi); 1209 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); 1210 1211 return rx_done + budget - remain_budget; 1212 } 1213 1214 static int mtk_tx_alloc(struct mtk_eth *eth) 1215 { 1216 struct mtk_tx_ring *ring = ð->tx_ring; 1217 int i, sz = sizeof(*ring->dma); 1218 1219 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), 1220 GFP_KERNEL); 1221 if (!ring->buf) 1222 goto no_tx_mem; 1223 1224 ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1225 &ring->phys, GFP_ATOMIC); 1226 if (!ring->dma) 1227 goto no_tx_mem; 1228 1229 for (i = 0; i < MTK_DMA_SIZE; i++) { 1230 int next = (i + 1) % MTK_DMA_SIZE; 1231 u32 next_ptr = ring->phys + next * sz; 1232 1233 ring->dma[i].txd2 = next_ptr; 1234 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 1235 } 1236 1237 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); 1238 ring->next_free = &ring->dma[0]; 1239 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; 1240 ring->thresh = MAX_SKB_FRAGS; 1241 1242 /* make sure that all changes to the dma ring are flushed before we 1243 * continue 1244 */ 1245 wmb(); 1246 1247 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); 1248 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); 1249 mtk_w32(eth, 1250 ring->phys + ((MTK_DMA_SIZE - 1) * sz), 1251 MTK_QTX_CRX_PTR); 1252 mtk_w32(eth, 1253 ring->phys + ((MTK_DMA_SIZE - 1) * sz), 1254 MTK_QTX_DRX_PTR); 1255 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); 1256 1257 return 0; 1258 1259 no_tx_mem: 1260 return -ENOMEM; 1261 } 1262 1263 static void mtk_tx_clean(struct mtk_eth *eth) 1264 { 1265 struct mtk_tx_ring *ring = ð->tx_ring; 1266 int i; 1267 1268 if (ring->buf) { 1269 for (i = 0; i < MTK_DMA_SIZE; i++) 1270 mtk_tx_unmap(eth, &ring->buf[i]); 1271 kfree(ring->buf); 1272 ring->buf = NULL; 1273 } 1274 1275 if (ring->dma) { 1276 dma_free_coherent(eth->dev, 1277 MTK_DMA_SIZE * sizeof(*ring->dma), 1278 ring->dma, 1279 ring->phys); 1280 ring->dma = NULL; 1281 } 1282 } 1283 1284 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) 1285 { 1286 struct mtk_rx_ring *ring; 1287 int rx_data_len, rx_dma_size; 1288 int i; 1289 u32 offset = 0; 1290 1291 if (rx_flag == MTK_RX_FLAGS_QDMA) { 1292 if (ring_no) 1293 return -EINVAL; 1294 ring = ð->rx_ring_qdma; 1295 offset = 0x1000; 1296 } else { 1297 ring = ð->rx_ring[ring_no]; 1298 } 1299 1300 if (rx_flag == MTK_RX_FLAGS_HWLRO) { 1301 rx_data_len = MTK_MAX_LRO_RX_LENGTH; 1302 rx_dma_size = MTK_HW_LRO_DMA_SIZE; 1303 } else { 1304 rx_data_len = ETH_DATA_LEN; 1305 rx_dma_size = MTK_DMA_SIZE; 1306 } 1307 1308 ring->frag_size = mtk_max_frag_size(rx_data_len); 1309 ring->buf_size = mtk_max_buf_size(ring->frag_size); 1310 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), 1311 GFP_KERNEL); 1312 if (!ring->data) 1313 return -ENOMEM; 1314 1315 for (i = 0; i < rx_dma_size; i++) { 1316 ring->data[i] = netdev_alloc_frag(ring->frag_size); 1317 if (!ring->data[i]) 1318 return -ENOMEM; 1319 } 1320 1321 ring->dma = dma_alloc_coherent(eth->dev, 1322 rx_dma_size * sizeof(*ring->dma), 1323 &ring->phys, 1324 GFP_ATOMIC | __GFP_ZERO); 1325 if (!ring->dma) 1326 return -ENOMEM; 1327 1328 for (i = 0; i < rx_dma_size; i++) { 1329 dma_addr_t dma_addr = dma_map_single(eth->dev, 1330 ring->data[i] + NET_SKB_PAD, 1331 ring->buf_size, 1332 DMA_FROM_DEVICE); 1333 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) 1334 return -ENOMEM; 1335 ring->dma[i].rxd1 = (unsigned int)dma_addr; 1336 1337 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); 1338 } 1339 ring->dma_size = rx_dma_size; 1340 ring->calc_idx_update = false; 1341 ring->calc_idx = rx_dma_size - 1; 1342 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no); 1343 /* make sure that all changes to the dma ring are flushed before we 1344 * continue 1345 */ 1346 wmb(); 1347 1348 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); 1349 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); 1350 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); 1351 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); 1352 1353 return 0; 1354 } 1355 1356 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) 1357 { 1358 int i; 1359 1360 if (ring->data && ring->dma) { 1361 for (i = 0; i < ring->dma_size; i++) { 1362 if (!ring->data[i]) 1363 continue; 1364 if (!ring->dma[i].rxd1) 1365 continue; 1366 dma_unmap_single(eth->dev, 1367 ring->dma[i].rxd1, 1368 ring->buf_size, 1369 DMA_FROM_DEVICE); 1370 skb_free_frag(ring->data[i]); 1371 } 1372 kfree(ring->data); 1373 ring->data = NULL; 1374 } 1375 1376 if (ring->dma) { 1377 dma_free_coherent(eth->dev, 1378 ring->dma_size * sizeof(*ring->dma), 1379 ring->dma, 1380 ring->phys); 1381 ring->dma = NULL; 1382 } 1383 } 1384 1385 static int mtk_hwlro_rx_init(struct mtk_eth *eth) 1386 { 1387 int i; 1388 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0; 1389 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0; 1390 1391 /* set LRO rings to auto-learn modes */ 1392 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE; 1393 1394 /* validate LRO ring */ 1395 ring_ctrl_dw2 |= MTK_RING_VLD; 1396 1397 /* set AGE timer (unit: 20us) */ 1398 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H; 1399 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L; 1400 1401 /* set max AGG timer (unit: 20us) */ 1402 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME; 1403 1404 /* set max LRO AGG count */ 1405 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L; 1406 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H; 1407 1408 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { 1409 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); 1410 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); 1411 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); 1412 } 1413 1414 /* IPv4 checksum update enable */ 1415 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN; 1416 1417 /* switch priority comparison to packet count mode */ 1418 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE; 1419 1420 /* bandwidth threshold setting */ 1421 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); 1422 1423 /* auto-learn score delta setting */ 1424 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); 1425 1426 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */ 1427 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, 1428 MTK_PDMA_LRO_ALT_REFRESH_TIMER); 1429 1430 /* set HW LRO mode & the max aggregation count for rx packets */ 1431 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff); 1432 1433 /* the minimal remaining room of SDL0 in RXD for lro aggregation */ 1434 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL; 1435 1436 /* enable HW LRO */ 1437 lro_ctrl_dw0 |= MTK_LRO_EN; 1438 1439 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); 1440 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); 1441 1442 return 0; 1443 } 1444 1445 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) 1446 { 1447 int i; 1448 u32 val; 1449 1450 /* relinquish lro rings, flush aggregated packets */ 1451 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); 1452 1453 /* wait for relinquishments done */ 1454 for (i = 0; i < 10; i++) { 1455 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); 1456 if (val & MTK_LRO_RING_RELINQUISH_DONE) { 1457 msleep(20); 1458 continue; 1459 } 1460 break; 1461 } 1462 1463 /* invalidate lro rings */ 1464 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) 1465 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); 1466 1467 /* disable HW LRO */ 1468 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); 1469 } 1470 1471 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) 1472 { 1473 u32 reg_val; 1474 1475 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); 1476 1477 /* invalidate the IP setting */ 1478 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); 1479 1480 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); 1481 1482 /* validate the IP setting */ 1483 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); 1484 } 1485 1486 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) 1487 { 1488 u32 reg_val; 1489 1490 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); 1491 1492 /* invalidate the IP setting */ 1493 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); 1494 1495 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); 1496 } 1497 1498 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac) 1499 { 1500 int cnt = 0; 1501 int i; 1502 1503 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { 1504 if (mac->hwlro_ip[i]) 1505 cnt++; 1506 } 1507 1508 return cnt; 1509 } 1510 1511 static int mtk_hwlro_add_ipaddr(struct net_device *dev, 1512 struct ethtool_rxnfc *cmd) 1513 { 1514 struct ethtool_rx_flow_spec *fsp = 1515 (struct ethtool_rx_flow_spec *)&cmd->fs; 1516 struct mtk_mac *mac = netdev_priv(dev); 1517 struct mtk_eth *eth = mac->hw; 1518 int hwlro_idx; 1519 1520 if ((fsp->flow_type != TCP_V4_FLOW) || 1521 (!fsp->h_u.tcp_ip4_spec.ip4dst) || 1522 (fsp->location > 1)) 1523 return -EINVAL; 1524 1525 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); 1526 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; 1527 1528 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); 1529 1530 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); 1531 1532 return 0; 1533 } 1534 1535 static int mtk_hwlro_del_ipaddr(struct net_device *dev, 1536 struct ethtool_rxnfc *cmd) 1537 { 1538 struct ethtool_rx_flow_spec *fsp = 1539 (struct ethtool_rx_flow_spec *)&cmd->fs; 1540 struct mtk_mac *mac = netdev_priv(dev); 1541 struct mtk_eth *eth = mac->hw; 1542 int hwlro_idx; 1543 1544 if (fsp->location > 1) 1545 return -EINVAL; 1546 1547 mac->hwlro_ip[fsp->location] = 0; 1548 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; 1549 1550 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); 1551 1552 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); 1553 1554 return 0; 1555 } 1556 1557 static void mtk_hwlro_netdev_disable(struct net_device *dev) 1558 { 1559 struct mtk_mac *mac = netdev_priv(dev); 1560 struct mtk_eth *eth = mac->hw; 1561 int i, hwlro_idx; 1562 1563 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { 1564 mac->hwlro_ip[i] = 0; 1565 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; 1566 1567 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); 1568 } 1569 1570 mac->hwlro_ip_cnt = 0; 1571 } 1572 1573 static int mtk_hwlro_get_fdir_entry(struct net_device *dev, 1574 struct ethtool_rxnfc *cmd) 1575 { 1576 struct mtk_mac *mac = netdev_priv(dev); 1577 struct ethtool_rx_flow_spec *fsp = 1578 (struct ethtool_rx_flow_spec *)&cmd->fs; 1579 1580 /* only tcp dst ipv4 is meaningful, others are meaningless */ 1581 fsp->flow_type = TCP_V4_FLOW; 1582 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); 1583 fsp->m_u.tcp_ip4_spec.ip4dst = 0; 1584 1585 fsp->h_u.tcp_ip4_spec.ip4src = 0; 1586 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; 1587 fsp->h_u.tcp_ip4_spec.psrc = 0; 1588 fsp->m_u.tcp_ip4_spec.psrc = 0xffff; 1589 fsp->h_u.tcp_ip4_spec.pdst = 0; 1590 fsp->m_u.tcp_ip4_spec.pdst = 0xffff; 1591 fsp->h_u.tcp_ip4_spec.tos = 0; 1592 fsp->m_u.tcp_ip4_spec.tos = 0xff; 1593 1594 return 0; 1595 } 1596 1597 static int mtk_hwlro_get_fdir_all(struct net_device *dev, 1598 struct ethtool_rxnfc *cmd, 1599 u32 *rule_locs) 1600 { 1601 struct mtk_mac *mac = netdev_priv(dev); 1602 int cnt = 0; 1603 int i; 1604 1605 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { 1606 if (mac->hwlro_ip[i]) { 1607 rule_locs[cnt] = i; 1608 cnt++; 1609 } 1610 } 1611 1612 cmd->rule_cnt = cnt; 1613 1614 return 0; 1615 } 1616 1617 static netdev_features_t mtk_fix_features(struct net_device *dev, 1618 netdev_features_t features) 1619 { 1620 if (!(features & NETIF_F_LRO)) { 1621 struct mtk_mac *mac = netdev_priv(dev); 1622 int ip_cnt = mtk_hwlro_get_ip_cnt(mac); 1623 1624 if (ip_cnt) { 1625 netdev_info(dev, "RX flow is programmed, LRO should keep on\n"); 1626 1627 features |= NETIF_F_LRO; 1628 } 1629 } 1630 1631 return features; 1632 } 1633 1634 static int mtk_set_features(struct net_device *dev, netdev_features_t features) 1635 { 1636 int err = 0; 1637 1638 if (!((dev->features ^ features) & NETIF_F_LRO)) 1639 return 0; 1640 1641 if (!(features & NETIF_F_LRO)) 1642 mtk_hwlro_netdev_disable(dev); 1643 1644 return err; 1645 } 1646 1647 /* wait for DMA to finish whatever it is doing before we start using it again */ 1648 static int mtk_dma_busy_wait(struct mtk_eth *eth) 1649 { 1650 unsigned long t_start = jiffies; 1651 1652 while (1) { 1653 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & 1654 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) 1655 return 0; 1656 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) 1657 break; 1658 } 1659 1660 dev_err(eth->dev, "DMA init timeout\n"); 1661 return -1; 1662 } 1663 1664 static int mtk_dma_init(struct mtk_eth *eth) 1665 { 1666 int err; 1667 u32 i; 1668 1669 if (mtk_dma_busy_wait(eth)) 1670 return -EBUSY; 1671 1672 /* QDMA needs scratch memory for internal reordering of the 1673 * descriptors 1674 */ 1675 err = mtk_init_fq_dma(eth); 1676 if (err) 1677 return err; 1678 1679 err = mtk_tx_alloc(eth); 1680 if (err) 1681 return err; 1682 1683 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); 1684 if (err) 1685 return err; 1686 1687 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); 1688 if (err) 1689 return err; 1690 1691 if (eth->hwlro) { 1692 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { 1693 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); 1694 if (err) 1695 return err; 1696 } 1697 err = mtk_hwlro_rx_init(eth); 1698 if (err) 1699 return err; 1700 } 1701 1702 /* Enable random early drop and set drop threshold automatically */ 1703 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, 1704 MTK_QDMA_FC_THRES); 1705 mtk_w32(eth, 0x0, MTK_QDMA_HRED2); 1706 1707 return 0; 1708 } 1709 1710 static void mtk_dma_free(struct mtk_eth *eth) 1711 { 1712 int i; 1713 1714 for (i = 0; i < MTK_MAC_COUNT; i++) 1715 if (eth->netdev[i]) 1716 netdev_reset_queue(eth->netdev[i]); 1717 if (eth->scratch_ring) { 1718 dma_free_coherent(eth->dev, 1719 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), 1720 eth->scratch_ring, 1721 eth->phy_scratch_ring); 1722 eth->scratch_ring = NULL; 1723 eth->phy_scratch_ring = 0; 1724 } 1725 mtk_tx_clean(eth); 1726 mtk_rx_clean(eth, ð->rx_ring[0]); 1727 mtk_rx_clean(eth, ð->rx_ring_qdma); 1728 1729 if (eth->hwlro) { 1730 mtk_hwlro_rx_uninit(eth); 1731 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) 1732 mtk_rx_clean(eth, ð->rx_ring[i]); 1733 } 1734 1735 kfree(eth->scratch_head); 1736 } 1737 1738 static void mtk_tx_timeout(struct net_device *dev) 1739 { 1740 struct mtk_mac *mac = netdev_priv(dev); 1741 struct mtk_eth *eth = mac->hw; 1742 1743 eth->netdev[mac->id]->stats.tx_errors++; 1744 netif_err(eth, tx_err, dev, 1745 "transmit timed out\n"); 1746 schedule_work(ð->pending_work); 1747 } 1748 1749 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) 1750 { 1751 struct mtk_eth *eth = _eth; 1752 1753 if (likely(napi_schedule_prep(ð->rx_napi))) { 1754 __napi_schedule(ð->rx_napi); 1755 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); 1756 } 1757 1758 return IRQ_HANDLED; 1759 } 1760 1761 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) 1762 { 1763 struct mtk_eth *eth = _eth; 1764 1765 if (likely(napi_schedule_prep(ð->tx_napi))) { 1766 __napi_schedule(ð->tx_napi); 1767 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); 1768 } 1769 1770 return IRQ_HANDLED; 1771 } 1772 1773 #ifdef CONFIG_NET_POLL_CONTROLLER 1774 static void mtk_poll_controller(struct net_device *dev) 1775 { 1776 struct mtk_mac *mac = netdev_priv(dev); 1777 struct mtk_eth *eth = mac->hw; 1778 1779 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); 1780 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); 1781 mtk_handle_irq_rx(eth->irq[2], dev); 1782 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); 1783 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); 1784 } 1785 #endif 1786 1787 static int mtk_start_dma(struct mtk_eth *eth) 1788 { 1789 int err; 1790 1791 err = mtk_dma_init(eth); 1792 if (err) { 1793 mtk_dma_free(eth); 1794 return err; 1795 } 1796 1797 mtk_w32(eth, 1798 MTK_TX_WB_DDONE | MTK_TX_DMA_EN | 1799 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | 1800 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | 1801 MTK_RX_BT_32DWORDS, 1802 MTK_QDMA_GLO_CFG); 1803 1804 mtk_w32(eth, 1805 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | 1806 MTK_RX_BT_32DWORDS | MTK_MULTI_EN, 1807 MTK_PDMA_GLO_CFG); 1808 1809 return 0; 1810 } 1811 1812 static int mtk_open(struct net_device *dev) 1813 { 1814 struct mtk_mac *mac = netdev_priv(dev); 1815 struct mtk_eth *eth = mac->hw; 1816 1817 /* we run 2 netdevs on the same dma ring so we only bring it up once */ 1818 if (!refcount_read(ð->dma_refcnt)) { 1819 int err = mtk_start_dma(eth); 1820 1821 if (err) 1822 return err; 1823 1824 napi_enable(ð->tx_napi); 1825 napi_enable(ð->rx_napi); 1826 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); 1827 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); 1828 refcount_set(ð->dma_refcnt, 1); 1829 } 1830 else 1831 refcount_inc(ð->dma_refcnt); 1832 1833 phy_start(dev->phydev); 1834 netif_start_queue(dev); 1835 1836 return 0; 1837 } 1838 1839 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) 1840 { 1841 u32 val; 1842 int i; 1843 1844 /* stop the dma engine */ 1845 spin_lock_bh(ð->page_lock); 1846 val = mtk_r32(eth, glo_cfg); 1847 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), 1848 glo_cfg); 1849 spin_unlock_bh(ð->page_lock); 1850 1851 /* wait for dma stop */ 1852 for (i = 0; i < 10; i++) { 1853 val = mtk_r32(eth, glo_cfg); 1854 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { 1855 msleep(20); 1856 continue; 1857 } 1858 break; 1859 } 1860 } 1861 1862 static int mtk_stop(struct net_device *dev) 1863 { 1864 struct mtk_mac *mac = netdev_priv(dev); 1865 struct mtk_eth *eth = mac->hw; 1866 1867 netif_tx_disable(dev); 1868 phy_stop(dev->phydev); 1869 1870 /* only shutdown DMA if this is the last user */ 1871 if (!refcount_dec_and_test(ð->dma_refcnt)) 1872 return 0; 1873 1874 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); 1875 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); 1876 napi_disable(ð->tx_napi); 1877 napi_disable(ð->rx_napi); 1878 1879 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); 1880 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); 1881 1882 mtk_dma_free(eth); 1883 1884 return 0; 1885 } 1886 1887 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) 1888 { 1889 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, 1890 reset_bits, 1891 reset_bits); 1892 1893 usleep_range(1000, 1100); 1894 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, 1895 reset_bits, 1896 ~reset_bits); 1897 mdelay(10); 1898 } 1899 1900 static void mtk_clk_disable(struct mtk_eth *eth) 1901 { 1902 int clk; 1903 1904 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) 1905 clk_disable_unprepare(eth->clks[clk]); 1906 } 1907 1908 static int mtk_clk_enable(struct mtk_eth *eth) 1909 { 1910 int clk, ret; 1911 1912 for (clk = 0; clk < MTK_CLK_MAX ; clk++) { 1913 ret = clk_prepare_enable(eth->clks[clk]); 1914 if (ret) 1915 goto err_disable_clks; 1916 } 1917 1918 return 0; 1919 1920 err_disable_clks: 1921 while (--clk >= 0) 1922 clk_disable_unprepare(eth->clks[clk]); 1923 1924 return ret; 1925 } 1926 1927 static int mtk_hw_init(struct mtk_eth *eth) 1928 { 1929 int i, val, ret; 1930 1931 if (test_and_set_bit(MTK_HW_INIT, ð->state)) 1932 return 0; 1933 1934 pm_runtime_enable(eth->dev); 1935 pm_runtime_get_sync(eth->dev); 1936 1937 ret = mtk_clk_enable(eth); 1938 if (ret) 1939 goto err_disable_pm; 1940 1941 ethsys_reset(eth, RSTCTRL_FE); 1942 ethsys_reset(eth, RSTCTRL_PPE); 1943 1944 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); 1945 for (i = 0; i < MTK_MAC_COUNT; i++) { 1946 if (!eth->mac[i]) 1947 continue; 1948 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id); 1949 val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id); 1950 } 1951 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); 1952 1953 if (eth->pctl) { 1954 /* Set GE2 driving and slew rate */ 1955 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); 1956 1957 /* set GE2 TDSEL */ 1958 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); 1959 1960 /* set GE2 TUNE */ 1961 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); 1962 } 1963 1964 /* Set linkdown as the default for each GMAC. Its own MCR would be set 1965 * up with the more appropriate value when mtk_phy_link_adjust call is 1966 * being invoked. 1967 */ 1968 for (i = 0; i < MTK_MAC_COUNT; i++) 1969 mtk_w32(eth, 0, MTK_MAC_MCR(i)); 1970 1971 /* Indicates CDM to parse the MTK special tag from CPU 1972 * which also is working out for untag packets. 1973 */ 1974 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); 1975 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); 1976 1977 /* Enable RX VLan Offloading */ 1978 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); 1979 1980 /* enable interrupt delay for RX */ 1981 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); 1982 1983 /* disable delay and normal interrupt */ 1984 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); 1985 mtk_tx_irq_disable(eth, ~0); 1986 mtk_rx_irq_disable(eth, ~0); 1987 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); 1988 mtk_w32(eth, 0, MTK_RST_GL); 1989 1990 /* FE int grouping */ 1991 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); 1992 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); 1993 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); 1994 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); 1995 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); 1996 1997 for (i = 0; i < 2; i++) { 1998 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); 1999 2000 /* setup the forward port to send frame to PDMA */ 2001 val &= ~0xffff; 2002 2003 /* Enable RX checksum */ 2004 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; 2005 2006 /* setup the mac dma */ 2007 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); 2008 } 2009 2010 return 0; 2011 2012 err_disable_pm: 2013 pm_runtime_put_sync(eth->dev); 2014 pm_runtime_disable(eth->dev); 2015 2016 return ret; 2017 } 2018 2019 static int mtk_hw_deinit(struct mtk_eth *eth) 2020 { 2021 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) 2022 return 0; 2023 2024 mtk_clk_disable(eth); 2025 2026 pm_runtime_put_sync(eth->dev); 2027 pm_runtime_disable(eth->dev); 2028 2029 return 0; 2030 } 2031 2032 static int __init mtk_init(struct net_device *dev) 2033 { 2034 struct mtk_mac *mac = netdev_priv(dev); 2035 struct mtk_eth *eth = mac->hw; 2036 const char *mac_addr; 2037 2038 mac_addr = of_get_mac_address(mac->of_node); 2039 if (mac_addr) 2040 ether_addr_copy(dev->dev_addr, mac_addr); 2041 2042 /* If the mac address is invalid, use random mac address */ 2043 if (!is_valid_ether_addr(dev->dev_addr)) { 2044 eth_hw_addr_random(dev); 2045 dev_err(eth->dev, "generated random MAC address %pM\n", 2046 dev->dev_addr); 2047 } 2048 2049 return mtk_phy_connect(dev); 2050 } 2051 2052 static void mtk_uninit(struct net_device *dev) 2053 { 2054 struct mtk_mac *mac = netdev_priv(dev); 2055 struct mtk_eth *eth = mac->hw; 2056 2057 phy_disconnect(dev->phydev); 2058 if (of_phy_is_fixed_link(mac->of_node)) 2059 of_phy_deregister_fixed_link(mac->of_node); 2060 mtk_tx_irq_disable(eth, ~0); 2061 mtk_rx_irq_disable(eth, ~0); 2062 } 2063 2064 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2065 { 2066 switch (cmd) { 2067 case SIOCGMIIPHY: 2068 case SIOCGMIIREG: 2069 case SIOCSMIIREG: 2070 return phy_mii_ioctl(dev->phydev, ifr, cmd); 2071 default: 2072 break; 2073 } 2074 2075 return -EOPNOTSUPP; 2076 } 2077 2078 static void mtk_pending_work(struct work_struct *work) 2079 { 2080 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); 2081 int err, i; 2082 unsigned long restart = 0; 2083 2084 rtnl_lock(); 2085 2086 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); 2087 2088 while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) 2089 cpu_relax(); 2090 2091 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); 2092 /* stop all devices to make sure that dma is properly shut down */ 2093 for (i = 0; i < MTK_MAC_COUNT; i++) { 2094 if (!eth->netdev[i]) 2095 continue; 2096 mtk_stop(eth->netdev[i]); 2097 __set_bit(i, &restart); 2098 } 2099 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); 2100 2101 /* restart underlying hardware such as power, clock, pin mux 2102 * and the connected phy 2103 */ 2104 mtk_hw_deinit(eth); 2105 2106 if (eth->dev->pins) 2107 pinctrl_select_state(eth->dev->pins->p, 2108 eth->dev->pins->default_state); 2109 mtk_hw_init(eth); 2110 2111 for (i = 0; i < MTK_MAC_COUNT; i++) { 2112 if (!eth->mac[i] || 2113 of_phy_is_fixed_link(eth->mac[i]->of_node)) 2114 continue; 2115 err = phy_init_hw(eth->netdev[i]->phydev); 2116 if (err) 2117 dev_err(eth->dev, "%s: PHY init failed.\n", 2118 eth->netdev[i]->name); 2119 } 2120 2121 /* restart DMA and enable IRQs */ 2122 for (i = 0; i < MTK_MAC_COUNT; i++) { 2123 if (!test_bit(i, &restart)) 2124 continue; 2125 err = mtk_open(eth->netdev[i]); 2126 if (err) { 2127 netif_alert(eth, ifup, eth->netdev[i], 2128 "Driver up/down cycle failed, closing device.\n"); 2129 dev_close(eth->netdev[i]); 2130 } 2131 } 2132 2133 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); 2134 2135 clear_bit_unlock(MTK_RESETTING, ð->state); 2136 2137 rtnl_unlock(); 2138 } 2139 2140 static int mtk_free_dev(struct mtk_eth *eth) 2141 { 2142 int i; 2143 2144 for (i = 0; i < MTK_MAC_COUNT; i++) { 2145 if (!eth->netdev[i]) 2146 continue; 2147 free_netdev(eth->netdev[i]); 2148 } 2149 2150 return 0; 2151 } 2152 2153 static int mtk_unreg_dev(struct mtk_eth *eth) 2154 { 2155 int i; 2156 2157 for (i = 0; i < MTK_MAC_COUNT; i++) { 2158 if (!eth->netdev[i]) 2159 continue; 2160 unregister_netdev(eth->netdev[i]); 2161 } 2162 2163 return 0; 2164 } 2165 2166 static int mtk_cleanup(struct mtk_eth *eth) 2167 { 2168 mtk_unreg_dev(eth); 2169 mtk_free_dev(eth); 2170 cancel_work_sync(ð->pending_work); 2171 2172 return 0; 2173 } 2174 2175 static int mtk_get_link_ksettings(struct net_device *ndev, 2176 struct ethtool_link_ksettings *cmd) 2177 { 2178 struct mtk_mac *mac = netdev_priv(ndev); 2179 2180 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) 2181 return -EBUSY; 2182 2183 phy_ethtool_ksettings_get(ndev->phydev, cmd); 2184 2185 return 0; 2186 } 2187 2188 static int mtk_set_link_ksettings(struct net_device *ndev, 2189 const struct ethtool_link_ksettings *cmd) 2190 { 2191 struct mtk_mac *mac = netdev_priv(ndev); 2192 2193 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) 2194 return -EBUSY; 2195 2196 return phy_ethtool_ksettings_set(ndev->phydev, cmd); 2197 } 2198 2199 static void mtk_get_drvinfo(struct net_device *dev, 2200 struct ethtool_drvinfo *info) 2201 { 2202 struct mtk_mac *mac = netdev_priv(dev); 2203 2204 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); 2205 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); 2206 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); 2207 } 2208 2209 static u32 mtk_get_msglevel(struct net_device *dev) 2210 { 2211 struct mtk_mac *mac = netdev_priv(dev); 2212 2213 return mac->hw->msg_enable; 2214 } 2215 2216 static void mtk_set_msglevel(struct net_device *dev, u32 value) 2217 { 2218 struct mtk_mac *mac = netdev_priv(dev); 2219 2220 mac->hw->msg_enable = value; 2221 } 2222 2223 static int mtk_nway_reset(struct net_device *dev) 2224 { 2225 struct mtk_mac *mac = netdev_priv(dev); 2226 2227 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) 2228 return -EBUSY; 2229 2230 return genphy_restart_aneg(dev->phydev); 2231 } 2232 2233 static u32 mtk_get_link(struct net_device *dev) 2234 { 2235 struct mtk_mac *mac = netdev_priv(dev); 2236 int err; 2237 2238 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) 2239 return -EBUSY; 2240 2241 err = genphy_update_link(dev->phydev); 2242 if (err) 2243 return ethtool_op_get_link(dev); 2244 2245 return dev->phydev->link; 2246 } 2247 2248 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2249 { 2250 int i; 2251 2252 switch (stringset) { 2253 case ETH_SS_STATS: 2254 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { 2255 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); 2256 data += ETH_GSTRING_LEN; 2257 } 2258 break; 2259 } 2260 } 2261 2262 static int mtk_get_sset_count(struct net_device *dev, int sset) 2263 { 2264 switch (sset) { 2265 case ETH_SS_STATS: 2266 return ARRAY_SIZE(mtk_ethtool_stats); 2267 default: 2268 return -EOPNOTSUPP; 2269 } 2270 } 2271 2272 static void mtk_get_ethtool_stats(struct net_device *dev, 2273 struct ethtool_stats *stats, u64 *data) 2274 { 2275 struct mtk_mac *mac = netdev_priv(dev); 2276 struct mtk_hw_stats *hwstats = mac->hw_stats; 2277 u64 *data_src, *data_dst; 2278 unsigned int start; 2279 int i; 2280 2281 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) 2282 return; 2283 2284 if (netif_running(dev) && netif_device_present(dev)) { 2285 if (spin_trylock_bh(&hwstats->stats_lock)) { 2286 mtk_stats_update_mac(mac); 2287 spin_unlock_bh(&hwstats->stats_lock); 2288 } 2289 } 2290 2291 data_src = (u64 *)hwstats; 2292 2293 do { 2294 data_dst = data; 2295 start = u64_stats_fetch_begin_irq(&hwstats->syncp); 2296 2297 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) 2298 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); 2299 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); 2300 } 2301 2302 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2303 u32 *rule_locs) 2304 { 2305 int ret = -EOPNOTSUPP; 2306 2307 switch (cmd->cmd) { 2308 case ETHTOOL_GRXRINGS: 2309 if (dev->features & NETIF_F_LRO) { 2310 cmd->data = MTK_MAX_RX_RING_NUM; 2311 ret = 0; 2312 } 2313 break; 2314 case ETHTOOL_GRXCLSRLCNT: 2315 if (dev->features & NETIF_F_LRO) { 2316 struct mtk_mac *mac = netdev_priv(dev); 2317 2318 cmd->rule_cnt = mac->hwlro_ip_cnt; 2319 ret = 0; 2320 } 2321 break; 2322 case ETHTOOL_GRXCLSRULE: 2323 if (dev->features & NETIF_F_LRO) 2324 ret = mtk_hwlro_get_fdir_entry(dev, cmd); 2325 break; 2326 case ETHTOOL_GRXCLSRLALL: 2327 if (dev->features & NETIF_F_LRO) 2328 ret = mtk_hwlro_get_fdir_all(dev, cmd, 2329 rule_locs); 2330 break; 2331 default: 2332 break; 2333 } 2334 2335 return ret; 2336 } 2337 2338 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2339 { 2340 int ret = -EOPNOTSUPP; 2341 2342 switch (cmd->cmd) { 2343 case ETHTOOL_SRXCLSRLINS: 2344 if (dev->features & NETIF_F_LRO) 2345 ret = mtk_hwlro_add_ipaddr(dev, cmd); 2346 break; 2347 case ETHTOOL_SRXCLSRLDEL: 2348 if (dev->features & NETIF_F_LRO) 2349 ret = mtk_hwlro_del_ipaddr(dev, cmd); 2350 break; 2351 default: 2352 break; 2353 } 2354 2355 return ret; 2356 } 2357 2358 static const struct ethtool_ops mtk_ethtool_ops = { 2359 .get_link_ksettings = mtk_get_link_ksettings, 2360 .set_link_ksettings = mtk_set_link_ksettings, 2361 .get_drvinfo = mtk_get_drvinfo, 2362 .get_msglevel = mtk_get_msglevel, 2363 .set_msglevel = mtk_set_msglevel, 2364 .nway_reset = mtk_nway_reset, 2365 .get_link = mtk_get_link, 2366 .get_strings = mtk_get_strings, 2367 .get_sset_count = mtk_get_sset_count, 2368 .get_ethtool_stats = mtk_get_ethtool_stats, 2369 .get_rxnfc = mtk_get_rxnfc, 2370 .set_rxnfc = mtk_set_rxnfc, 2371 }; 2372 2373 static const struct net_device_ops mtk_netdev_ops = { 2374 .ndo_init = mtk_init, 2375 .ndo_uninit = mtk_uninit, 2376 .ndo_open = mtk_open, 2377 .ndo_stop = mtk_stop, 2378 .ndo_start_xmit = mtk_start_xmit, 2379 .ndo_set_mac_address = mtk_set_mac_address, 2380 .ndo_validate_addr = eth_validate_addr, 2381 .ndo_do_ioctl = mtk_do_ioctl, 2382 .ndo_tx_timeout = mtk_tx_timeout, 2383 .ndo_get_stats64 = mtk_get_stats64, 2384 .ndo_fix_features = mtk_fix_features, 2385 .ndo_set_features = mtk_set_features, 2386 #ifdef CONFIG_NET_POLL_CONTROLLER 2387 .ndo_poll_controller = mtk_poll_controller, 2388 #endif 2389 }; 2390 2391 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) 2392 { 2393 struct mtk_mac *mac; 2394 const __be32 *_id = of_get_property(np, "reg", NULL); 2395 int id, err; 2396 2397 if (!_id) { 2398 dev_err(eth->dev, "missing mac id\n"); 2399 return -EINVAL; 2400 } 2401 2402 id = be32_to_cpup(_id); 2403 if (id >= MTK_MAC_COUNT) { 2404 dev_err(eth->dev, "%d is not a valid mac id\n", id); 2405 return -EINVAL; 2406 } 2407 2408 if (eth->netdev[id]) { 2409 dev_err(eth->dev, "duplicate mac id found: %d\n", id); 2410 return -EINVAL; 2411 } 2412 2413 eth->netdev[id] = alloc_etherdev(sizeof(*mac)); 2414 if (!eth->netdev[id]) { 2415 dev_err(eth->dev, "alloc_etherdev failed\n"); 2416 return -ENOMEM; 2417 } 2418 mac = netdev_priv(eth->netdev[id]); 2419 eth->mac[id] = mac; 2420 mac->id = id; 2421 mac->hw = eth; 2422 mac->of_node = np; 2423 2424 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); 2425 mac->hwlro_ip_cnt = 0; 2426 2427 mac->hw_stats = devm_kzalloc(eth->dev, 2428 sizeof(*mac->hw_stats), 2429 GFP_KERNEL); 2430 if (!mac->hw_stats) { 2431 dev_err(eth->dev, "failed to allocate counter memory\n"); 2432 err = -ENOMEM; 2433 goto free_netdev; 2434 } 2435 spin_lock_init(&mac->hw_stats->stats_lock); 2436 u64_stats_init(&mac->hw_stats->syncp); 2437 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; 2438 2439 SET_NETDEV_DEV(eth->netdev[id], eth->dev); 2440 eth->netdev[id]->watchdog_timeo = 5 * HZ; 2441 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; 2442 eth->netdev[id]->base_addr = (unsigned long)eth->base; 2443 2444 eth->netdev[id]->hw_features = MTK_HW_FEATURES; 2445 if (eth->hwlro) 2446 eth->netdev[id]->hw_features |= NETIF_F_LRO; 2447 2448 eth->netdev[id]->vlan_features = MTK_HW_FEATURES & 2449 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 2450 eth->netdev[id]->features |= MTK_HW_FEATURES; 2451 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; 2452 2453 eth->netdev[id]->irq = eth->irq[0]; 2454 eth->netdev[id]->dev.of_node = np; 2455 2456 return 0; 2457 2458 free_netdev: 2459 free_netdev(eth->netdev[id]); 2460 return err; 2461 } 2462 2463 static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) 2464 { 2465 u32 val[2], id[4]; 2466 2467 regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]); 2468 regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]); 2469 2470 id[3] = ((val[0] >> 16) & 0xff) - '0'; 2471 id[2] = ((val[0] >> 24) & 0xff) - '0'; 2472 id[1] = (val[1] & 0xff) - '0'; 2473 id[0] = ((val[1] >> 8) & 0xff) - '0'; 2474 2475 *chip_id = (id[3] * 1000) + (id[2] * 100) + 2476 (id[1] * 10) + id[0]; 2477 2478 if (!(*chip_id)) { 2479 dev_err(eth->dev, "failed to get chip id\n"); 2480 return -ENODEV; 2481 } 2482 2483 dev_info(eth->dev, "chip id = %d\n", *chip_id); 2484 2485 return 0; 2486 } 2487 2488 static bool mtk_is_hwlro_supported(struct mtk_eth *eth) 2489 { 2490 switch (eth->chip_id) { 2491 case MT7622_ETH: 2492 case MT7623_ETH: 2493 return true; 2494 } 2495 2496 return false; 2497 } 2498 2499 static int mtk_probe(struct platform_device *pdev) 2500 { 2501 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2502 struct device_node *mac_np; 2503 struct mtk_eth *eth; 2504 int err; 2505 int i; 2506 2507 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); 2508 if (!eth) 2509 return -ENOMEM; 2510 2511 eth->soc = of_device_get_match_data(&pdev->dev); 2512 2513 eth->dev = &pdev->dev; 2514 eth->base = devm_ioremap_resource(&pdev->dev, res); 2515 if (IS_ERR(eth->base)) 2516 return PTR_ERR(eth->base); 2517 2518 spin_lock_init(ð->page_lock); 2519 spin_lock_init(ð->tx_irq_lock); 2520 spin_lock_init(ð->rx_irq_lock); 2521 2522 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 2523 "mediatek,ethsys"); 2524 if (IS_ERR(eth->ethsys)) { 2525 dev_err(&pdev->dev, "no ethsys regmap found\n"); 2526 return PTR_ERR(eth->ethsys); 2527 } 2528 2529 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { 2530 eth->sgmiisys = 2531 syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 2532 "mediatek,sgmiisys"); 2533 if (IS_ERR(eth->sgmiisys)) { 2534 dev_err(&pdev->dev, "no sgmiisys regmap found\n"); 2535 return PTR_ERR(eth->sgmiisys); 2536 } 2537 } 2538 2539 if (eth->soc->required_pctl) { 2540 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 2541 "mediatek,pctl"); 2542 if (IS_ERR(eth->pctl)) { 2543 dev_err(&pdev->dev, "no pctl regmap found\n"); 2544 return PTR_ERR(eth->pctl); 2545 } 2546 } 2547 2548 for (i = 0; i < 3; i++) { 2549 eth->irq[i] = platform_get_irq(pdev, i); 2550 if (eth->irq[i] < 0) { 2551 dev_err(&pdev->dev, "no IRQ%d resource found\n", i); 2552 return -ENXIO; 2553 } 2554 } 2555 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { 2556 eth->clks[i] = devm_clk_get(eth->dev, 2557 mtk_clks_source_name[i]); 2558 if (IS_ERR(eth->clks[i])) { 2559 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) 2560 return -EPROBE_DEFER; 2561 if (eth->soc->required_clks & BIT(i)) { 2562 dev_err(&pdev->dev, "clock %s not found\n", 2563 mtk_clks_source_name[i]); 2564 return -EINVAL; 2565 } 2566 eth->clks[i] = NULL; 2567 } 2568 } 2569 2570 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); 2571 INIT_WORK(ð->pending_work, mtk_pending_work); 2572 2573 err = mtk_hw_init(eth); 2574 if (err) 2575 return err; 2576 2577 err = mtk_get_chip_id(eth, ð->chip_id); 2578 if (err) 2579 return err; 2580 2581 eth->hwlro = mtk_is_hwlro_supported(eth); 2582 2583 for_each_child_of_node(pdev->dev.of_node, mac_np) { 2584 if (!of_device_is_compatible(mac_np, 2585 "mediatek,eth-mac")) 2586 continue; 2587 2588 if (!of_device_is_available(mac_np)) 2589 continue; 2590 2591 err = mtk_add_mac(eth, mac_np); 2592 if (err) 2593 goto err_deinit_hw; 2594 } 2595 2596 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, 2597 dev_name(eth->dev), eth); 2598 if (err) 2599 goto err_free_dev; 2600 2601 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, 2602 dev_name(eth->dev), eth); 2603 if (err) 2604 goto err_free_dev; 2605 2606 err = mtk_mdio_init(eth); 2607 if (err) 2608 goto err_free_dev; 2609 2610 for (i = 0; i < MTK_MAX_DEVS; i++) { 2611 if (!eth->netdev[i]) 2612 continue; 2613 2614 err = register_netdev(eth->netdev[i]); 2615 if (err) { 2616 dev_err(eth->dev, "error bringing up device\n"); 2617 goto err_deinit_mdio; 2618 } else 2619 netif_info(eth, probe, eth->netdev[i], 2620 "mediatek frame engine at 0x%08lx, irq %d\n", 2621 eth->netdev[i]->base_addr, eth->irq[0]); 2622 } 2623 2624 /* we run 2 devices on the same DMA ring so we need a dummy device 2625 * for NAPI to work 2626 */ 2627 init_dummy_netdev(ð->dummy_dev); 2628 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, 2629 MTK_NAPI_WEIGHT); 2630 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, 2631 MTK_NAPI_WEIGHT); 2632 2633 platform_set_drvdata(pdev, eth); 2634 2635 return 0; 2636 2637 err_deinit_mdio: 2638 mtk_mdio_cleanup(eth); 2639 err_free_dev: 2640 mtk_free_dev(eth); 2641 err_deinit_hw: 2642 mtk_hw_deinit(eth); 2643 2644 return err; 2645 } 2646 2647 static int mtk_remove(struct platform_device *pdev) 2648 { 2649 struct mtk_eth *eth = platform_get_drvdata(pdev); 2650 int i; 2651 2652 /* stop all devices to make sure that dma is properly shut down */ 2653 for (i = 0; i < MTK_MAC_COUNT; i++) { 2654 if (!eth->netdev[i]) 2655 continue; 2656 mtk_stop(eth->netdev[i]); 2657 } 2658 2659 mtk_hw_deinit(eth); 2660 2661 netif_napi_del(ð->tx_napi); 2662 netif_napi_del(ð->rx_napi); 2663 mtk_cleanup(eth); 2664 mtk_mdio_cleanup(eth); 2665 2666 return 0; 2667 } 2668 2669 static const struct mtk_soc_data mt2701_data = { 2670 .caps = MTK_GMAC1_TRGMII, 2671 .required_clks = MT7623_CLKS_BITMAP, 2672 .required_pctl = true, 2673 }; 2674 2675 static const struct mtk_soc_data mt7622_data = { 2676 .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW, 2677 .required_clks = MT7622_CLKS_BITMAP, 2678 .required_pctl = false, 2679 }; 2680 2681 static const struct mtk_soc_data mt7623_data = { 2682 .caps = MTK_GMAC1_TRGMII, 2683 .required_clks = MT7623_CLKS_BITMAP, 2684 .required_pctl = true, 2685 }; 2686 2687 const struct of_device_id of_mtk_match[] = { 2688 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, 2689 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, 2690 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, 2691 {}, 2692 }; 2693 MODULE_DEVICE_TABLE(of, of_mtk_match); 2694 2695 static struct platform_driver mtk_driver = { 2696 .probe = mtk_probe, 2697 .remove = mtk_remove, 2698 .driver = { 2699 .name = "mtk_soc_eth", 2700 .of_match_table = of_mtk_match, 2701 }, 2702 }; 2703 2704 module_platform_driver(mtk_driver); 2705 2706 MODULE_LICENSE("GPL"); 2707 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); 2708 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); 2709