Lines Matching +full:mt7622 +full:- +full:eth
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
34 static int mtk_msg_level = -1;
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
282 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) in mtk_w32() argument
284 __raw_writel(val, eth->base + reg); in mtk_w32()
287 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
289 return __raw_readl(eth->base + reg); in mtk_r32()
292 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg) in mtk_m32() argument
296 val = mtk_r32(eth, reg); in mtk_m32()
299 mtk_w32(eth, val, reg); in mtk_m32()
303 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
308 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
315 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
316 return -ETIMEDOUT; in mtk_mdio_busy_wait()
319 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, in _mtk_mdio_write_c22() argument
324 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
328 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c22()
336 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
343 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write_c45() argument
348 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
352 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
360 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
364 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
372 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
379 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) in _mtk_mdio_read_c22() argument
383 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
387 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c22()
394 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
398 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c22()
401 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_read_c45() argument
406 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
410 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
418 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
422 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
429 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
433 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c45()
439 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c22() local
441 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val); in mtk_mdio_write_c22()
447 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c45() local
449 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val); in mtk_mdio_write_c45()
454 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c22() local
456 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg); in mtk_mdio_read_c22()
462 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c45() local
464 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg); in mtk_mdio_read_c45()
467 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, in mt7621_gmac0_rgmii_adjust() argument
475 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
481 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, in mtk_gmac0_rgmii_adjust() argument
487 mtk_w32(eth, TRGMII_MODE, INTF_MODE); in mtk_gmac0_rgmii_adjust()
488 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000); in mtk_gmac0_rgmii_adjust()
490 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
494 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n"); in mtk_gmac0_rgmii_adjust()
497 static void mtk_setup_bridge_switch(struct mtk_eth *eth) in mtk_setup_bridge_switch() argument
500 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID), in mtk_setup_bridge_switch()
504 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK, in mtk_setup_bridge_switch()
515 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs() local
520 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
521 0 : mac->id; in mtk_mac_select_pcs()
523 return eth->sgmii_pcs[sid]; in mtk_mac_select_pcs()
534 struct mtk_eth *eth = mac->hw; in mtk_mac_config() local
539 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
540 mac->interface != state->interface) { in mtk_mac_config()
542 switch (state->interface) { in mtk_mac_config()
549 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
550 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
558 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
563 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
564 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
576 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII && in mtk_mac_config()
577 !phy_interface_mode_is_8023z(state->interface) && in mtk_mac_config()
578 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) { in mtk_mac_config()
579 if (MTK_HAS_CAPS(mac->hw->soc->caps, in mtk_mac_config()
581 if (mt7621_gmac0_rgmii_adjust(mac->hw, in mtk_mac_config()
582 state->interface)) in mtk_mac_config()
585 mtk_gmac0_rgmii_adjust(mac->hw, in mtk_mac_config()
586 state->interface); in mtk_mac_config()
590 mtk_w32(mac->hw, in mtk_mac_config()
595 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL, in mtk_mac_config()
597 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL); in mtk_mac_config()
601 switch (state->interface) { in mtk_mac_config()
612 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
613 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); in mtk_mac_config()
614 val |= SYSCFG0_GE_MODE(ge_mode, mac->id); in mtk_mac_config()
615 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
617 mac->interface = state->interface; in mtk_mac_config()
621 if (state->interface == PHY_INTERFACE_MODE_SGMII || in mtk_mac_config()
622 phy_interface_mode_is_8023z(state->interface)) { in mtk_mac_config()
626 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
628 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
633 mac->syscfg0 = val; in mtk_mac_config()
635 dev_err(eth->dev, in mtk_mac_config()
636 "In-band mode not supported in non SGMII mode!\n"); in mtk_mac_config()
641 if (mtk_is_netsys_v3_or_greater(eth) && in mtk_mac_config()
642 mac->interface == PHY_INTERFACE_MODE_INTERNAL) { in mtk_mac_config()
643 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id)); in mtk_mac_config()
644 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id)); in mtk_mac_config()
646 mtk_setup_bridge_switch(eth); in mtk_mac_config()
652 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
653 mac->id, phy_modes(state->interface)); in mtk_mac_config()
657 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
658 mac->id, phy_modes(state->interface), err); in mtk_mac_config()
666 struct mtk_eth *eth = mac->hw; in mtk_mac_finish() local
672 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
673 SYSCFG0_SGMII_MASK, mac->syscfg0); in mtk_mac_finish()
676 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_mac_finish()
683 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); in mtk_mac_finish()
693 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_mac_link_down()
696 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); in mtk_mac_link_down()
699 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, in mtk_set_queue_speed() argument
702 const struct mtk_soc_data *soc = eth->soc; in mtk_set_queue_speed()
705 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) in mtk_set_queue_speed()
713 if (mtk_is_netsys_v1(eth)) in mtk_set_queue_speed()
765 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_set_queue_speed()
777 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_mac_link_up()
783 mac->speed = speed; in mtk_mac_link_up()
798 /* Configure pause modes - phylink will avoid these for half duplex */ in mtk_mac_link_up()
805 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); in mtk_mac_link_up()
816 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
823 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
825 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
826 return -ENODEV; in mtk_mdio_init()
830 ret = -ENODEV; in mtk_mdio_init()
834 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
835 if (!eth->mii_bus) { in mtk_mdio_init()
836 ret = -ENOMEM; in mtk_mdio_init()
840 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
841 eth->mii_bus->read = mtk_mdio_read_c22; in mtk_mdio_init()
842 eth->mii_bus->write = mtk_mdio_write_c22; in mtk_mdio_init()
843 eth->mii_bus->read_c45 = mtk_mdio_read_c45; in mtk_mdio_init()
844 eth->mii_bus->write_c45 = mtk_mdio_write_c45; in mtk_mdio_init()
845 eth->mii_bus->priv = eth; in mtk_mdio_init()
846 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
848 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
850 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) { in mtk_mdio_init()
852 dev_err(eth->dev, "MDIO clock frequency out of range"); in mtk_mdio_init()
853 ret = -EINVAL; in mtk_mdio_init()
861 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
862 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); in mtk_mdio_init()
866 if (!mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
868 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); in mtk_mdio_init()
870 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); in mtk_mdio_init()
872 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
879 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
881 if (!eth->mii_bus) in mtk_mdio_cleanup()
884 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
887 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
892 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
893 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
894 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
895 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
898 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
903 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
904 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
905 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
906 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
909 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
914 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
915 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
916 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
917 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
920 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
925 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
926 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
927 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
928 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
935 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address() local
936 const char *macaddr = dev->dev_addr; in mtk_set_mac_address()
941 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_set_mac_address()
942 return -EBUSY; in mtk_set_mac_address()
944 spin_lock_bh(&mac->hw->page_lock); in mtk_set_mac_address()
945 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
946 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], in mtk_set_mac_address()
948 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | in mtk_set_mac_address()
952 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], in mtk_set_mac_address()
953 MTK_GDMA_MAC_ADRH(mac->id)); in mtk_set_mac_address()
954 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | in mtk_set_mac_address()
956 MTK_GDMA_MAC_ADRL(mac->id)); in mtk_set_mac_address()
958 spin_unlock_bh(&mac->hw->page_lock); in mtk_set_mac_address()
965 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_stats_update_mac()
966 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac() local
968 u64_stats_update_begin(&hw_stats->syncp); in mtk_stats_update_mac()
970 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
971 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT); in mtk_stats_update_mac()
972 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT); in mtk_stats_update_mac()
973 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT); in mtk_stats_update_mac()
974 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT); in mtk_stats_update_mac()
975 hw_stats->rx_checksum_errors += in mtk_stats_update_mac()
976 mtk_r32(mac->hw, MT7628_SDM_CS_ERR); in mtk_stats_update_mac()
978 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
979 unsigned int offs = hw_stats->reg_offset; in mtk_stats_update_mac()
982 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs); in mtk_stats_update_mac()
983 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs); in mtk_stats_update_mac()
985 hw_stats->rx_bytes += (stats << 32); in mtk_stats_update_mac()
986 hw_stats->rx_packets += in mtk_stats_update_mac()
987 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs); in mtk_stats_update_mac()
988 hw_stats->rx_overflow += in mtk_stats_update_mac()
989 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs); in mtk_stats_update_mac()
990 hw_stats->rx_fcs_errors += in mtk_stats_update_mac()
991 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs); in mtk_stats_update_mac()
992 hw_stats->rx_short_errors += in mtk_stats_update_mac()
993 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs); in mtk_stats_update_mac()
994 hw_stats->rx_long_errors += in mtk_stats_update_mac()
995 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs); in mtk_stats_update_mac()
996 hw_stats->rx_checksum_errors += in mtk_stats_update_mac()
997 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs); in mtk_stats_update_mac()
998 hw_stats->rx_flow_control_packets += in mtk_stats_update_mac()
999 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs); in mtk_stats_update_mac()
1001 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_stats_update_mac()
1002 hw_stats->tx_skip += in mtk_stats_update_mac()
1003 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs); in mtk_stats_update_mac()
1004 hw_stats->tx_collisions += in mtk_stats_update_mac()
1005 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs); in mtk_stats_update_mac()
1006 hw_stats->tx_bytes += in mtk_stats_update_mac()
1007 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs); in mtk_stats_update_mac()
1008 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs); in mtk_stats_update_mac()
1010 hw_stats->tx_bytes += (stats << 32); in mtk_stats_update_mac()
1011 hw_stats->tx_packets += in mtk_stats_update_mac()
1012 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs); in mtk_stats_update_mac()
1014 hw_stats->tx_skip += in mtk_stats_update_mac()
1015 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); in mtk_stats_update_mac()
1016 hw_stats->tx_collisions += in mtk_stats_update_mac()
1017 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); in mtk_stats_update_mac()
1018 hw_stats->tx_bytes += in mtk_stats_update_mac()
1019 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); in mtk_stats_update_mac()
1020 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); in mtk_stats_update_mac()
1022 hw_stats->tx_bytes += (stats << 32); in mtk_stats_update_mac()
1023 hw_stats->tx_packets += in mtk_stats_update_mac()
1024 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); in mtk_stats_update_mac()
1028 u64_stats_update_end(&hw_stats->syncp); in mtk_stats_update_mac()
1031 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
1036 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
1038 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
1039 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
1040 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
1049 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_get_stats64()
1053 if (spin_trylock_bh(&hw_stats->stats_lock)) { in mtk_get_stats64()
1055 spin_unlock_bh(&hw_stats->stats_lock); in mtk_get_stats64()
1060 start = u64_stats_fetch_begin(&hw_stats->syncp); in mtk_get_stats64()
1061 storage->rx_packets = hw_stats->rx_packets; in mtk_get_stats64()
1062 storage->tx_packets = hw_stats->tx_packets; in mtk_get_stats64()
1063 storage->rx_bytes = hw_stats->rx_bytes; in mtk_get_stats64()
1064 storage->tx_bytes = hw_stats->tx_bytes; in mtk_get_stats64()
1065 storage->collisions = hw_stats->tx_collisions; in mtk_get_stats64()
1066 storage->rx_length_errors = hw_stats->rx_short_errors + in mtk_get_stats64()
1067 hw_stats->rx_long_errors; in mtk_get_stats64()
1068 storage->rx_over_errors = hw_stats->rx_overflow; in mtk_get_stats64()
1069 storage->rx_crc_errors = hw_stats->rx_fcs_errors; in mtk_get_stats64()
1070 storage->rx_errors = hw_stats->rx_checksum_errors; in mtk_get_stats64()
1071 storage->tx_aborted_errors = hw_stats->tx_skip; in mtk_get_stats64()
1072 } while (u64_stats_fetch_retry(&hw_stats->syncp, start)); in mtk_get_stats64()
1074 storage->tx_errors = dev->stats.tx_errors; in mtk_get_stats64()
1075 storage->rx_dropped = dev->stats.rx_dropped; in mtk_get_stats64()
1076 storage->tx_dropped = dev->stats.tx_dropped; in mtk_get_stats64()
1083 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_max_frag_size()
1091 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - in mtk_max_buf_size()
1099 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, in mtk_rx_get_desc() argument
1102 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); in mtk_rx_get_desc()
1103 if (!(rxd->rxd2 & RX_DMA_DONE)) in mtk_rx_get_desc()
1106 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); in mtk_rx_get_desc()
1107 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); in mtk_rx_get_desc()
1108 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); in mtk_rx_get_desc()
1109 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_rx_get_desc()
1110 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); in mtk_rx_get_desc()
1111 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); in mtk_rx_get_desc()
1129 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
1131 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
1137 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) in mtk_init_fq_dma()
1138 eth->scratch_ring = eth->sram_base; in mtk_init_fq_dma()
1140 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, in mtk_init_fq_dma()
1141 cnt * soc->txrx.txd_size, in mtk_init_fq_dma()
1142 ð->phy_scratch_ring, in mtk_init_fq_dma()
1144 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
1145 return -ENOMEM; in mtk_init_fq_dma()
1147 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
1148 if (unlikely(!eth->scratch_head)) in mtk_init_fq_dma()
1149 return -ENOMEM; in mtk_init_fq_dma()
1151 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
1152 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
1154 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
1155 return -ENOMEM; in mtk_init_fq_dma()
1157 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); in mtk_init_fq_dma()
1162 txd = eth->scratch_ring + i * soc->txrx.txd_size; in mtk_init_fq_dma()
1163 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; in mtk_init_fq_dma()
1164 if (i < cnt - 1) in mtk_init_fq_dma()
1165 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
1166 (i + 1) * soc->txrx.txd_size; in mtk_init_fq_dma()
1168 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); in mtk_init_fq_dma()
1169 txd->txd4 = 0; in mtk_init_fq_dma()
1170 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_init_fq_dma()
1171 txd->txd5 = 0; in mtk_init_fq_dma()
1172 txd->txd6 = 0; in mtk_init_fq_dma()
1173 txd->txd7 = 0; in mtk_init_fq_dma()
1174 txd->txd8 = 0; in mtk_init_fq_dma()
1178 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
1179 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
1180 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
1181 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
1188 return ring->dma + (desc - ring->phys); in mtk_qdma_phys_to_virt()
1194 int idx = (txd - ring->dma) / txd_size; in mtk_desc_to_tx_buf()
1196 return &ring->buf[idx]; in mtk_desc_to_tx_buf()
1202 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma; in qdma_to_pdma()
1207 return (dma - ring->dma) / txd_size; in txd_to_idx()
1210 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in mtk_tx_unmap() argument
1213 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1214 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { in mtk_tx_unmap()
1215 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1219 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { in mtk_tx_unmap()
1220 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1227 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1234 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1241 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_tx_unmap()
1242 if (tx_buf->type == MTK_TYPE_SKB) { in mtk_tx_unmap()
1243 struct sk_buff *skb = tx_buf->data; in mtk_tx_unmap()
1250 struct xdp_frame *xdpf = tx_buf->data; in mtk_tx_unmap()
1252 if (napi && tx_buf->type == MTK_TYPE_XDP_TX) in mtk_tx_unmap()
1260 tx_buf->flags = 0; in mtk_tx_unmap()
1261 tx_buf->data = NULL; in mtk_tx_unmap()
1264 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in setup_tx_buf() argument
1268 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1273 txd->txd3 = mapped_addr; in setup_tx_buf()
1274 txd->txd2 |= TX_DMA_PLEN1(size); in setup_tx_buf()
1278 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in setup_tx_buf()
1279 txd->txd1 = mapped_addr; in setup_tx_buf()
1280 txd->txd2 = TX_DMA_PLEN0(size); in setup_tx_buf()
1291 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1() local
1295 WRITE_ONCE(desc->txd1, info->addr); in mtk_tx_set_dma_desc_v1()
1297 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) | in mtk_tx_set_dma_desc_v1()
1298 FIELD_PREP(TX_DMA_PQID, info->qid); in mtk_tx_set_dma_desc_v1()
1299 if (info->last) in mtk_tx_set_dma_desc_v1()
1301 WRITE_ONCE(desc->txd3, data); in mtk_tx_set_dma_desc_v1()
1303 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */ in mtk_tx_set_dma_desc_v1()
1304 if (info->first) { in mtk_tx_set_dma_desc_v1()
1305 if (info->gso) in mtk_tx_set_dma_desc_v1()
1308 if (info->csum) in mtk_tx_set_dma_desc_v1()
1311 if (info->vlan) in mtk_tx_set_dma_desc_v1()
1312 data |= TX_DMA_INS_VLAN | info->vlan_tci; in mtk_tx_set_dma_desc_v1()
1314 WRITE_ONCE(desc->txd4, data); in mtk_tx_set_dma_desc_v1()
1322 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2() local
1325 WRITE_ONCE(desc->txd1, info->addr); in mtk_tx_set_dma_desc_v2()
1327 data = TX_DMA_PLEN0(info->size); in mtk_tx_set_dma_desc_v2()
1328 if (info->last) in mtk_tx_set_dma_desc_v2()
1331 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_tx_set_dma_desc_v2()
1332 data |= TX_DMA_PREP_ADDR64(info->addr); in mtk_tx_set_dma_desc_v2()
1334 WRITE_ONCE(desc->txd3, data); in mtk_tx_set_dma_desc_v2()
1337 switch (mac->id) { in mtk_tx_set_dma_desc_v2()
1349 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); in mtk_tx_set_dma_desc_v2()
1350 WRITE_ONCE(desc->txd4, data); in mtk_tx_set_dma_desc_v2()
1353 if (info->first) { in mtk_tx_set_dma_desc_v2()
1354 if (info->gso) in mtk_tx_set_dma_desc_v2()
1357 if (info->csum) in mtk_tx_set_dma_desc_v2()
1359 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev)) in mtk_tx_set_dma_desc_v2()
1362 WRITE_ONCE(desc->txd5, data); in mtk_tx_set_dma_desc_v2()
1365 if (info->first && info->vlan) in mtk_tx_set_dma_desc_v2()
1366 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci; in mtk_tx_set_dma_desc_v2()
1367 WRITE_ONCE(desc->txd6, data); in mtk_tx_set_dma_desc_v2()
1369 WRITE_ONCE(desc->txd7, 0); in mtk_tx_set_dma_desc_v2()
1370 WRITE_ONCE(desc->txd8, 0); in mtk_tx_set_dma_desc_v2()
1377 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc() local
1379 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_set_dma_desc()
1391 .csum = skb->ip_summed == CHECKSUM_PARTIAL, in mtk_tx_map()
1400 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
1401 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1410 itxd = ring->next_free; in mtk_tx_map()
1412 if (itxd == ring->last_free) in mtk_tx_map()
1413 return -ENOMEM; in mtk_tx_map()
1415 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); in mtk_tx_map()
1418 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1420 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1421 return -ENOMEM; in mtk_tx_map()
1425 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; in mtk_tx_map()
1426 itx_buf->mac_id = mac->id; in mtk_tx_map()
1427 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, in mtk_tx_map()
1434 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mtk_tx_map()
1435 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mtk_tx_map()
1442 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || in mtk_tx_map()
1444 txd = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_tx_map()
1446 if (txd == ring->last_free) in mtk_tx_map()
1456 soc->txrx.dma_max_len); in mtk_tx_map()
1458 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && in mtk_tx_map()
1459 !(frag_size - txd_info.size); in mtk_tx_map()
1460 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1463 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1469 soc->txrx.txd_size); in mtk_tx_map()
1472 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in mtk_tx_map()
1473 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; in mtk_tx_map()
1474 tx_buf->mac_id = mac->id; in mtk_tx_map()
1476 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, in mtk_tx_map()
1479 frag_size -= txd_info.size; in mtk_tx_map()
1485 itx_buf->type = MTK_TYPE_SKB; in mtk_tx_map()
1486 itx_buf->data = skb; in mtk_tx_map()
1488 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_map()
1490 txd_pdma->txd2 |= TX_DMA_LS0; in mtk_tx_map()
1492 txd_pdma->txd2 |= TX_DMA_LS1; in mtk_tx_map()
1495 netdev_tx_sent_queue(txq, skb->len); in mtk_tx_map()
1498 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_tx_map()
1499 atomic_sub(n_desc, &ring->free_count); in mtk_tx_map()
1506 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_map()
1508 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1512 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size), in mtk_tx_map()
1513 ring->dma_size); in mtk_tx_map()
1514 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); in mtk_tx_map()
1521 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); in mtk_tx_map()
1524 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_tx_map()
1526 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_tx_map()
1527 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) in mtk_tx_map()
1528 itxd_pdma->txd2 = TX_DMA_DESP2_DEF; in mtk_tx_map()
1530 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); in mtk_tx_map()
1534 return -ENOMEM; in mtk_tx_map()
1537 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) in mtk_cal_txd_req() argument
1543 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mtk_cal_txd_req()
1544 frag = &skb_shinfo(skb)->frags[i]; in mtk_cal_txd_req()
1546 eth->soc->txrx.dma_max_len); in mtk_cal_txd_req()
1549 nfrags += skb_shinfo(skb)->nr_frags; in mtk_cal_txd_req()
1555 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
1560 if (!eth->netdev[i]) in mtk_queue_stopped()
1562 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1569 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
1574 if (!eth->netdev[i]) in mtk_wake_queue()
1576 netif_tx_wake_all_queues(eth->netdev[i]); in mtk_wake_queue()
1583 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
1584 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
1585 struct net_device_stats *stats = &dev->stats; in mtk_start_xmit()
1593 spin_lock(ð->page_lock); in mtk_start_xmit()
1595 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
1598 tx_num = mtk_cal_txd_req(eth, skb); in mtk_start_xmit()
1599 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { in mtk_start_xmit()
1601 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
1603 spin_unlock(ð->page_lock); in mtk_start_xmit()
1610 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
1615 if (skb_shinfo(skb)->gso_type & in mtk_start_xmit()
1618 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); in mtk_start_xmit()
1625 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) in mtk_start_xmit()
1628 spin_unlock(ð->page_lock); in mtk_start_xmit()
1633 spin_unlock(ð->page_lock); in mtk_start_xmit()
1634 stats->tx_dropped++; in mtk_start_xmit()
1639 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
1645 if (!eth->hwlro) in mtk_get_rx_ring()
1646 return ð->rx_ring[0]; in mtk_get_rx_ring()
1651 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
1652 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); in mtk_get_rx_ring()
1653 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_get_rx_ring()
1654 if (rxd->rxd2 & RX_DMA_DONE) { in mtk_get_rx_ring()
1655 ring->calc_idx_update = true; in mtk_get_rx_ring()
1663 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
1668 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1669 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
1670 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1673 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
1674 if (ring->calc_idx_update) { in mtk_update_rx_cpu_idx()
1675 ring->calc_idx_update = false; in mtk_update_rx_cpu_idx()
1676 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1682 static bool mtk_page_pool_enabled(struct mtk_eth *eth) in mtk_page_pool_enabled() argument
1684 return mtk_is_netsys_v2_or_greater(eth); in mtk_page_pool_enabled()
1687 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, in mtk_create_page_pool() argument
1696 .dev = eth->dma_dev, in mtk_create_page_pool()
1703 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1709 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, in mtk_create_page_pool()
1710 eth->rx_napi.napi_id, PAGE_SIZE); in mtk_create_page_pool()
1743 if (ring->page_pool) in mtk_rx_put_buff()
1744 page_pool_put_full_page(ring->page_pool, in mtk_rx_put_buff()
1750 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, in mtk_xdp_frame_map() argument
1755 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_frame_map()
1760 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1761 txd_info->size, DMA_TO_DEVICE); in mtk_xdp_frame_map()
1762 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1763 return -ENOMEM; in mtk_xdp_frame_map()
1765 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; in mtk_xdp_frame_map()
1769 txd_info->addr = page_pool_get_dma_addr(page) + in mtk_xdp_frame_map()
1771 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1772 txd_info->size, DMA_BIDIRECTIONAL); in mtk_xdp_frame_map()
1776 tx_buf->mac_id = mac->id; in mtk_xdp_frame_map()
1777 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; in mtk_xdp_frame_map()
1778 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in mtk_xdp_frame_map()
1781 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1787 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, in mtk_xdp_submit_frame() argument
1791 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1792 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_submit_frame()
1795 .size = xdpf->len, in mtk_xdp_submit_frame()
1798 .qid = mac->id, in mtk_xdp_submit_frame()
1803 void *data = xdpf->data; in mtk_xdp_submit_frame()
1805 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_xdp_submit_frame()
1806 return -EBUSY; in mtk_xdp_submit_frame()
1808 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in mtk_xdp_submit_frame()
1809 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags)) in mtk_xdp_submit_frame()
1810 return -EBUSY; in mtk_xdp_submit_frame()
1812 spin_lock(ð->page_lock); in mtk_xdp_submit_frame()
1814 txd = ring->next_free; in mtk_xdp_submit_frame()
1815 if (txd == ring->last_free) { in mtk_xdp_submit_frame()
1816 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1817 return -ENOMEM; in mtk_xdp_submit_frame()
1821 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); in mtk_xdp_submit_frame()
1826 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, in mtk_xdp_submit_frame()
1827 data, xdpf->headroom, index, dma_map); in mtk_xdp_submit_frame()
1834 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { in mtk_xdp_submit_frame()
1835 txd = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_xdp_submit_frame()
1836 if (txd == ring->last_free) in mtk_xdp_submit_frame()
1840 soc->txrx.txd_size); in mtk_xdp_submit_frame()
1846 txd_info.size = skb_frag_size(&sinfo->frags[index]); in mtk_xdp_submit_frame()
1848 txd_info.qid = mac->id; in mtk_xdp_submit_frame()
1849 data = skb_frag_address(&sinfo->frags[index]); in mtk_xdp_submit_frame()
1854 htx_buf->data = xdpf; in mtk_xdp_submit_frame()
1856 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
1860 txd_pdma->txd2 |= TX_DMA_LS0; in mtk_xdp_submit_frame()
1862 txd_pdma->txd2 |= TX_DMA_LS1; in mtk_xdp_submit_frame()
1865 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_xdp_submit_frame()
1866 atomic_sub(n_desc, &ring->free_count); in mtk_xdp_submit_frame()
1873 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
1874 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
1878 idx = txd_to_idx(ring, txd, soc->txrx.txd_size); in mtk_xdp_submit_frame()
1879 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
1883 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1889 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); in mtk_xdp_submit_frame()
1890 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_xdp_submit_frame()
1892 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_xdp_submit_frame()
1893 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
1896 txd_pdma->txd2 = TX_DMA_DESP2_DEF; in mtk_xdp_submit_frame()
1899 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); in mtk_xdp_submit_frame()
1902 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1911 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_xdp_xmit()
1912 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit() local
1916 return -EINVAL; in mtk_xdp_xmit()
1919 if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) in mtk_xdp_xmit()
1924 u64_stats_update_begin(&hw_stats->syncp); in mtk_xdp_xmit()
1925 hw_stats->xdp_stats.tx_xdp_xmit += nxmit; in mtk_xdp_xmit()
1926 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit; in mtk_xdp_xmit()
1927 u64_stats_update_end(&hw_stats->syncp); in mtk_xdp_xmit()
1932 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, in mtk_xdp_run() argument
1936 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_xdp_run()
1937 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop; in mtk_xdp_run()
1943 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
1950 count = &hw_stats->xdp_stats.rx_xdp_pass; in mtk_xdp_run()
1958 count = &hw_stats->xdp_stats.rx_xdp_redirect; in mtk_xdp_run()
1963 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) { in mtk_xdp_run()
1964 count = &hw_stats->xdp_stats.rx_xdp_tx_errors; in mtk_xdp_run()
1969 count = &hw_stats->xdp_stats.rx_xdp_tx; in mtk_xdp_run()
1982 page_pool_put_full_page(ring->page_pool, in mtk_xdp_run()
1983 virt_to_head_page(xdp->data), true); in mtk_xdp_run()
1986 u64_stats_update_begin(&hw_stats->syncp); in mtk_xdp_run()
1988 u64_stats_update_end(&hw_stats->syncp); in mtk_xdp_run()
1996 struct mtk_eth *eth) in mtk_poll_rx() argument
2015 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
2019 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); in mtk_poll_rx()
2020 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_poll_rx()
2021 data = ring->data[idx]; in mtk_poll_rx()
2023 if (!mtk_rx_get_desc(eth, &trxd, rxd)) in mtk_poll_rx()
2027 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_poll_rx()
2033 mac = val - 1; in mtk_poll_rx()
2041 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
2043 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1; in mtk_poll_rx()
2047 !eth->netdev[mac])) in mtk_poll_rx()
2050 netdev = eth->netdev[mac]; in mtk_poll_rx()
2052 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
2058 if (ring->page_pool) { in mtk_poll_rx()
2063 new_data = mtk_page_pool_get_buff(ring->page_pool, in mtk_poll_rx()
2067 netdev->stats.rx_dropped++; in mtk_poll_rx()
2071 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
2073 pktlen, page_pool_get_dma_dir(ring->page_pool)); in mtk_poll_rx()
2075 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q); in mtk_poll_rx()
2080 ret = mtk_xdp_run(eth, ring, &xdp, netdev); in mtk_poll_rx()
2089 page_pool_put_full_page(ring->page_pool, in mtk_poll_rx()
2091 netdev->stats.rx_dropped++; in mtk_poll_rx()
2095 skb_reserve(skb, xdp.data - xdp.data_hard_start); in mtk_poll_rx()
2096 skb_put(skb, xdp.data_end - xdp.data); in mtk_poll_rx()
2099 if (ring->frag_size <= PAGE_SIZE) in mtk_poll_rx()
2100 new_data = napi_alloc_frag(ring->frag_size); in mtk_poll_rx()
2105 netdev->stats.rx_dropped++; in mtk_poll_rx()
2109 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
2110 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
2111 ring->buf_size, DMA_FROM_DEVICE); in mtk_poll_rx()
2112 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
2115 netdev->stats.rx_dropped++; in mtk_poll_rx()
2119 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_poll_rx()
2122 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), in mtk_poll_rx()
2123 ring->buf_size, DMA_FROM_DEVICE); in mtk_poll_rx()
2125 skb = build_skb(data, ring->frag_size); in mtk_poll_rx()
2127 netdev->stats.rx_dropped++; in mtk_poll_rx()
2136 skb->dev = netdev; in mtk_poll_rx()
2137 bytes += skb->len; in mtk_poll_rx()
2139 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_poll_rx()
2155 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) in mtk_poll_rx()
2156 skb->ip_summed = CHECKSUM_UNNECESSARY; in mtk_poll_rx()
2159 skb->protocol = eth_type_trans(skb, netdev); in mtk_poll_rx()
2164 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) && in mtk_poll_rx()
2168 if (port < ARRAY_SIZE(eth->dsa_meta) && in mtk_poll_rx()
2169 eth->dsa_meta[port]) in mtk_poll_rx()
2170 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); in mtk_poll_rx()
2174 mtk_ppe_check_skb(eth->ppe[0], skb, hash); in mtk_poll_rx()
2180 ring->data[idx] = new_data; in mtk_poll_rx()
2181 rxd->rxd1 = (unsigned int)dma_addr; in mtk_poll_rx()
2183 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
2184 rxd->rxd2 = RX_DMA_LSO; in mtk_poll_rx()
2186 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); in mtk_poll_rx()
2188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && in mtk_poll_rx()
2190 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); in mtk_poll_rx()
2192 ring->calc_idx = idx; in mtk_poll_rx()
2202 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
2205 eth->rx_packets += done; in mtk_poll_rx()
2206 eth->rx_bytes += bytes; in mtk_poll_rx()
2207 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
2209 net_dim(ð->rx_dim, dim_sample); in mtk_poll_rx()
2225 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, in mtk_poll_tx_done() argument
2230 unsigned int bytes = skb->len; in mtk_poll_tx_done()
2232 state->total++; in mtk_poll_tx_done()
2233 eth->tx_packets++; in mtk_poll_tx_done()
2234 eth->tx_bytes += bytes; in mtk_poll_tx_done()
2236 dev = eth->netdev[mac]; in mtk_poll_tx_done()
2241 if (state->txq == txq) { in mtk_poll_tx_done()
2242 state->done++; in mtk_poll_tx_done()
2243 state->bytes += bytes; in mtk_poll_tx_done()
2247 if (state->txq) in mtk_poll_tx_done()
2248 netdev_tx_completed_queue(state->txq, state->done, state->bytes); in mtk_poll_tx_done()
2250 state->txq = txq; in mtk_poll_tx_done()
2251 state->done = 1; in mtk_poll_tx_done()
2252 state->bytes = bytes; in mtk_poll_tx_done()
2255 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_qdma() argument
2258 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
2259 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_qdma()
2265 cpu = ring->last_free_ptr; in mtk_poll_tx_qdma()
2266 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2272 u32 next_cpu = desc->txd2; in mtk_poll_tx_qdma()
2274 desc = mtk_qdma_phys_to_virt(ring, desc->txd2); in mtk_poll_tx_qdma()
2275 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) in mtk_poll_tx_qdma()
2279 eth->soc->txrx.txd_size); in mtk_poll_tx_qdma()
2280 if (!tx_buf->data) in mtk_poll_tx_qdma()
2283 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_poll_tx_qdma()
2284 if (tx_buf->type == MTK_TYPE_SKB) in mtk_poll_tx_qdma()
2285 mtk_poll_tx_done(eth, state, tx_buf->mac_id, in mtk_poll_tx_qdma()
2286 tx_buf->data); in mtk_poll_tx_qdma()
2288 budget--; in mtk_poll_tx_qdma()
2290 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_qdma()
2292 ring->last_free = desc; in mtk_poll_tx_qdma()
2293 atomic_inc(&ring->free_count); in mtk_poll_tx_qdma()
2299 ring->last_free_ptr = cpu; in mtk_poll_tx_qdma()
2300 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2305 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_pdma() argument
2308 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_pdma()
2314 cpu = ring->cpu_idx; in mtk_poll_tx_pdma()
2315 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); in mtk_poll_tx_pdma()
2319 tx_buf = &ring->buf[cpu]; in mtk_poll_tx_pdma()
2320 if (!tx_buf->data) in mtk_poll_tx_pdma()
2323 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_poll_tx_pdma()
2324 if (tx_buf->type == MTK_TYPE_SKB) in mtk_poll_tx_pdma()
2325 mtk_poll_tx_done(eth, state, 0, tx_buf->data); in mtk_poll_tx_pdma()
2326 budget--; in mtk_poll_tx_pdma()
2328 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_pdma()
2330 desc = ring->dma + cpu * eth->soc->txrx.txd_size; in mtk_poll_tx_pdma()
2331 ring->last_free = desc; in mtk_poll_tx_pdma()
2332 atomic_inc(&ring->free_count); in mtk_poll_tx_pdma()
2334 cpu = NEXT_DESP_IDX(cpu, ring->dma_size); in mtk_poll_tx_pdma()
2338 ring->cpu_idx = cpu; in mtk_poll_tx_pdma()
2343 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
2345 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
2349 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2350 budget = mtk_poll_tx_qdma(eth, budget, &state); in mtk_poll_tx()
2352 budget = mtk_poll_tx_pdma(eth, budget, &state); in mtk_poll_tx()
2357 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2359 net_dim(ð->tx_dim, dim_sample); in mtk_poll_tx()
2361 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
2362 (atomic_read(&ring->free_count) > ring->thresh)) in mtk_poll_tx()
2363 mtk_wake_queue(eth); in mtk_poll_tx()
2368 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
2370 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
2373 mtk_stats_update(eth); in mtk_handle_status_irq()
2374 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
2381 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
2382 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2385 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2386 mtk_handle_status_irq(eth); in mtk_napi_tx()
2387 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2388 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
2390 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
2391 dev_info(eth->dev, in mtk_napi_tx()
2393 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2394 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2400 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2404 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
2411 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
2412 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2415 mtk_handle_status_irq(eth); in mtk_napi_rx()
2420 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, in mtk_napi_rx()
2421 reg_map->pdma.irq_status); in mtk_napi_rx()
2422 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2425 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
2426 dev_info(eth->dev, in mtk_napi_rx()
2428 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2429 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2435 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2436 eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2439 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2444 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
2446 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2447 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
2448 int i, sz = soc->txrx.txd_size; in mtk_tx_alloc()
2453 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) in mtk_tx_alloc()
2458 ring->buf = kcalloc(ring_size, sizeof(*ring->buf), in mtk_tx_alloc()
2460 if (!ring->buf) in mtk_tx_alloc()
2463 if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) { in mtk_tx_alloc()
2464 ring->dma = eth->sram_base + ring_size * sz; in mtk_tx_alloc()
2465 ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; in mtk_tx_alloc()
2467 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2468 &ring->phys, GFP_KERNEL); in mtk_tx_alloc()
2471 if (!ring->dma) in mtk_tx_alloc()
2476 u32 next_ptr = ring->phys + next * sz; in mtk_tx_alloc()
2478 txd = ring->dma + i * sz; in mtk_tx_alloc()
2479 txd->txd2 = next_ptr; in mtk_tx_alloc()
2480 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_tx_alloc()
2481 txd->txd4 = 0; in mtk_tx_alloc()
2482 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_tx_alloc()
2483 txd->txd5 = 0; in mtk_tx_alloc()
2484 txd->txd6 = 0; in mtk_tx_alloc()
2485 txd->txd7 = 0; in mtk_tx_alloc()
2486 txd->txd8 = 0; in mtk_tx_alloc()
2490 /* On MT7688 (PDMA only) this driver uses the ring->dma structs in mtk_tx_alloc()
2492 * descriptors in ring->dma_pdma. in mtk_tx_alloc()
2494 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
2495 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2496 &ring->phys_pdma, GFP_KERNEL); in mtk_tx_alloc()
2497 if (!ring->dma_pdma) in mtk_tx_alloc()
2501 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; in mtk_tx_alloc()
2502 ring->dma_pdma[i].txd4 = 0; in mtk_tx_alloc()
2506 ring->dma_size = ring_size; in mtk_tx_alloc()
2507 atomic_set(&ring->free_count, ring_size - 2); in mtk_tx_alloc()
2508 ring->next_free = ring->dma; in mtk_tx_alloc()
2509 ring->last_free = (void *)txd; in mtk_tx_alloc()
2510 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz)); in mtk_tx_alloc()
2511 ring->thresh = MAX_SKB_FRAGS; in mtk_tx_alloc()
2518 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
2519 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2520 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2521 mtk_w32(eth, in mtk_tx_alloc()
2522 ring->phys + ((ring_size - 1) * sz), in mtk_tx_alloc()
2523 soc->reg_map->qdma.crx_ptr); in mtk_tx_alloc()
2524 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2528 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); in mtk_tx_alloc()
2535 if (mtk_is_netsys_v1(eth)) in mtk_tx_alloc()
2537 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_tx_alloc()
2541 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); in mtk_tx_alloc()
2542 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_alloc()
2543 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); in mtk_tx_alloc()
2545 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2546 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); in mtk_tx_alloc()
2547 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); in mtk_tx_alloc()
2548 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2554 return -ENOMEM; in mtk_tx_alloc()
2557 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
2559 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2560 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
2563 if (ring->buf) { in mtk_tx_clean()
2564 for (i = 0; i < ring->dma_size; i++) in mtk_tx_clean()
2565 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2566 kfree(ring->buf); in mtk_tx_clean()
2567 ring->buf = NULL; in mtk_tx_clean()
2569 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) { in mtk_tx_clean()
2570 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2571 ring->dma_size * soc->txrx.txd_size, in mtk_tx_clean()
2572 ring->dma, ring->phys); in mtk_tx_clean()
2573 ring->dma = NULL; in mtk_tx_clean()
2576 if (ring->dma_pdma) { in mtk_tx_clean()
2577 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2578 ring->dma_size * soc->txrx.txd_size, in mtk_tx_clean()
2579 ring->dma_pdma, ring->phys_pdma); in mtk_tx_clean()
2580 ring->dma_pdma = NULL; in mtk_tx_clean()
2584 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
2586 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2591 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_rx_alloc()
2598 return -EINVAL; in mtk_rx_alloc()
2599 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
2601 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
2612 ring->frag_size = mtk_max_frag_size(rx_data_len); in mtk_rx_alloc()
2613 ring->buf_size = mtk_max_buf_size(ring->frag_size); in mtk_rx_alloc()
2614 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), in mtk_rx_alloc()
2616 if (!ring->data) in mtk_rx_alloc()
2617 return -ENOMEM; in mtk_rx_alloc()
2619 if (mtk_page_pool_enabled(eth)) { in mtk_rx_alloc()
2622 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2627 ring->page_pool = pp; in mtk_rx_alloc()
2630 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || in mtk_rx_alloc()
2632 ring->dma = dma_alloc_coherent(eth->dma_dev, in mtk_rx_alloc()
2633 rx_dma_size * eth->soc->txrx.rxd_size, in mtk_rx_alloc()
2634 &ring->phys, GFP_KERNEL); in mtk_rx_alloc()
2636 struct mtk_tx_ring *tx_ring = ð->tx_ring; in mtk_rx_alloc()
2638 ring->dma = tx_ring->dma + tx_ring_size * in mtk_rx_alloc()
2639 eth->soc->txrx.txd_size * (ring_no + 1); in mtk_rx_alloc()
2640 ring->phys = tx_ring->phys + tx_ring_size * in mtk_rx_alloc()
2641 eth->soc->txrx.txd_size * (ring_no + 1); in mtk_rx_alloc()
2644 if (!ring->dma) in mtk_rx_alloc()
2645 return -ENOMEM; in mtk_rx_alloc()
2652 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_alloc()
2653 if (ring->page_pool) { in mtk_rx_alloc()
2654 data = mtk_page_pool_get_buff(ring->page_pool, in mtk_rx_alloc()
2657 return -ENOMEM; in mtk_rx_alloc()
2659 if (ring->frag_size <= PAGE_SIZE) in mtk_rx_alloc()
2660 data = netdev_alloc_frag(ring->frag_size); in mtk_rx_alloc()
2665 return -ENOMEM; in mtk_rx_alloc()
2667 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2668 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2669 ring->buf_size, DMA_FROM_DEVICE); in mtk_rx_alloc()
2670 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2673 return -ENOMEM; in mtk_rx_alloc()
2676 rxd->rxd1 = (unsigned int)dma_addr; in mtk_rx_alloc()
2677 ring->data[i] = data; in mtk_rx_alloc()
2679 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2680 rxd->rxd2 = RX_DMA_LSO; in mtk_rx_alloc()
2682 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); in mtk_rx_alloc()
2684 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_alloc()
2685 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); in mtk_rx_alloc()
2687 rxd->rxd3 = 0; in mtk_rx_alloc()
2688 rxd->rxd4 = 0; in mtk_rx_alloc()
2689 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_rx_alloc()
2690 rxd->rxd5 = 0; in mtk_rx_alloc()
2691 rxd->rxd6 = 0; in mtk_rx_alloc()
2692 rxd->rxd7 = 0; in mtk_rx_alloc()
2693 rxd->rxd8 = 0; in mtk_rx_alloc()
2697 ring->dma_size = rx_dma_size; in mtk_rx_alloc()
2698 ring->calc_idx_update = false; in mtk_rx_alloc()
2699 ring->calc_idx = rx_dma_size - 1; in mtk_rx_alloc()
2701 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr + in mtk_rx_alloc()
2704 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + in mtk_rx_alloc()
2712 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2713 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2714 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2715 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2716 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2717 reg_map->qdma.rst_idx); in mtk_rx_alloc()
2719 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2720 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2721 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2722 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2723 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2724 reg_map->pdma.rst_idx); in mtk_rx_alloc()
2726 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2731 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram) in mtk_rx_clean() argument
2736 if (ring->data && ring->dma) { in mtk_rx_clean()
2737 for (i = 0; i < ring->dma_size; i++) { in mtk_rx_clean()
2740 if (!ring->data[i]) in mtk_rx_clean()
2743 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_clean()
2744 if (!rxd->rxd1) in mtk_rx_clean()
2747 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_clean()
2748 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2); in mtk_rx_clean()
2750 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), in mtk_rx_clean()
2751 ring->buf_size, DMA_FROM_DEVICE); in mtk_rx_clean()
2752 mtk_rx_put_buff(ring, ring->data[i], false); in mtk_rx_clean()
2754 kfree(ring->data); in mtk_rx_clean()
2755 ring->data = NULL; in mtk_rx_clean()
2758 if (!in_sram && ring->dma) { in mtk_rx_clean()
2759 dma_free_coherent(eth->dma_dev, in mtk_rx_clean()
2760 ring->dma_size * eth->soc->txrx.rxd_size, in mtk_rx_clean()
2761 ring->dma, ring->phys); in mtk_rx_clean()
2762 ring->dma = NULL; in mtk_rx_clean()
2765 if (ring->page_pool) { in mtk_rx_clean()
2766 if (xdp_rxq_info_is_reg(&ring->xdp_q)) in mtk_rx_clean()
2767 xdp_rxq_info_unreg(&ring->xdp_q); in mtk_rx_clean()
2768 page_pool_destroy(ring->page_pool); in mtk_rx_clean()
2769 ring->page_pool = NULL; in mtk_rx_clean()
2773 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
2779 /* set LRO rings to auto-learn modes */ in mtk_hwlro_rx_init()
2797 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
2798 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
2799 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
2809 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
2811 /* auto-learn score delta setting */ in mtk_hwlro_rx_init()
2812 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
2815 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
2827 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
2828 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
2833 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
2839 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2843 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2853 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
2856 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2859 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
2863 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2866 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2868 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
2871 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2874 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
2878 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2881 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2883 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2892 if (mac->hwlro_ip[i]) in mtk_hwlro_get_ip_cnt()
2903 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_add_ipaddr()
2905 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
2908 if ((fsp->flow_type != TCP_V4_FLOW) || in mtk_hwlro_add_ipaddr()
2909 (!fsp->h_u.tcp_ip4_spec.ip4dst) || in mtk_hwlro_add_ipaddr()
2910 (fsp->location > 1)) in mtk_hwlro_add_ipaddr()
2911 return -EINVAL; in mtk_hwlro_add_ipaddr()
2913 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); in mtk_hwlro_add_ipaddr()
2914 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; in mtk_hwlro_add_ipaddr()
2916 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); in mtk_hwlro_add_ipaddr()
2918 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
2927 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_del_ipaddr()
2929 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
2932 if (fsp->location > 1) in mtk_hwlro_del_ipaddr()
2933 return -EINVAL; in mtk_hwlro_del_ipaddr()
2935 mac->hwlro_ip[fsp->location] = 0; in mtk_hwlro_del_ipaddr()
2936 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; in mtk_hwlro_del_ipaddr()
2938 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); in mtk_hwlro_del_ipaddr()
2940 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
2948 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
2952 mac->hwlro_ip[i] = 0; in mtk_hwlro_netdev_disable()
2953 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; in mtk_hwlro_netdev_disable()
2955 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
2958 mac->hwlro_ip_cnt = 0; in mtk_hwlro_netdev_disable()
2966 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_get_fdir_entry()
2968 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip)) in mtk_hwlro_get_fdir_entry()
2969 return -EINVAL; in mtk_hwlro_get_fdir_entry()
2972 fsp->flow_type = TCP_V4_FLOW; in mtk_hwlro_get_fdir_entry()
2973 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); in mtk_hwlro_get_fdir_entry()
2974 fsp->m_u.tcp_ip4_spec.ip4dst = 0; in mtk_hwlro_get_fdir_entry()
2976 fsp->h_u.tcp_ip4_spec.ip4src = 0; in mtk_hwlro_get_fdir_entry()
2977 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; in mtk_hwlro_get_fdir_entry()
2978 fsp->h_u.tcp_ip4_spec.psrc = 0; in mtk_hwlro_get_fdir_entry()
2979 fsp->m_u.tcp_ip4_spec.psrc = 0xffff; in mtk_hwlro_get_fdir_entry()
2980 fsp->h_u.tcp_ip4_spec.pdst = 0; in mtk_hwlro_get_fdir_entry()
2981 fsp->m_u.tcp_ip4_spec.pdst = 0xffff; in mtk_hwlro_get_fdir_entry()
2982 fsp->h_u.tcp_ip4_spec.tos = 0; in mtk_hwlro_get_fdir_entry()
2983 fsp->m_u.tcp_ip4_spec.tos = 0xff; in mtk_hwlro_get_fdir_entry()
2997 if (cnt == cmd->rule_cnt) in mtk_hwlro_get_fdir_all()
2998 return -EMSGSIZE; in mtk_hwlro_get_fdir_all()
3000 if (mac->hwlro_ip[i]) { in mtk_hwlro_get_fdir_all()
3006 cmd->rule_cnt = cnt; in mtk_hwlro_get_fdir_all()
3030 netdev_features_t diff = dev->features ^ features; in mtk_set_features()
3039 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
3045 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
3046 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
3048 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
3050 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
3054 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
3059 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
3064 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
3065 return -EBUSY; in mtk_dma_init()
3067 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3071 err = mtk_init_fq_dma(eth); in mtk_dma_init()
3076 err = mtk_tx_alloc(eth); in mtk_dma_init()
3080 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3081 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
3086 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
3090 if (eth->hwlro) { in mtk_dma_init()
3092 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
3096 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
3101 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3105 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | in mtk_dma_init()
3106 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
3107 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
3113 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
3115 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
3119 if (eth->netdev[i]) in mtk_dma_free()
3120 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
3121 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { in mtk_dma_free()
3122 dma_free_coherent(eth->dma_dev, in mtk_dma_free()
3123 MTK_QDMA_RING_SIZE * soc->txrx.txd_size, in mtk_dma_free()
3124 eth->scratch_ring, eth->phy_scratch_ring); in mtk_dma_free()
3125 eth->scratch_ring = NULL; in mtk_dma_free()
3126 eth->phy_scratch_ring = 0; in mtk_dma_free()
3128 mtk_tx_clean(eth); in mtk_dma_free()
3129 mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM)); in mtk_dma_free()
3130 mtk_rx_clean(eth, ð->rx_ring_qdma, false); in mtk_dma_free()
3132 if (eth->hwlro) { in mtk_dma_free()
3133 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
3135 mtk_rx_clean(eth, ð->rx_ring[i], false); in mtk_dma_free()
3138 kfree(eth->scratch_head); in mtk_dma_free()
3141 static bool mtk_hw_reset_check(struct mtk_eth *eth) in mtk_hw_reset_check() argument
3143 u32 val = mtk_r32(eth, MTK_INT_STATUS2); in mtk_hw_reset_check()
3153 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
3155 if (test_bit(MTK_RESETTING, ð->state)) in mtk_tx_timeout()
3158 if (!mtk_hw_reset_check(eth)) in mtk_tx_timeout()
3161 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
3162 netif_err(eth, tx_err, dev, "transmit timed out\n"); in mtk_tx_timeout()
3164 schedule_work(ð->pending_work); in mtk_tx_timeout()
3169 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
3171 eth->rx_events++; in mtk_handle_irq_rx()
3172 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
3173 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_handle_irq_rx()
3174 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
3182 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
3184 eth->tx_events++; in mtk_handle_irq_tx()
3185 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
3186 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
3187 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
3195 struct mtk_eth *eth = _eth; in mtk_handle_irq() local
3196 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
3198 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
3199 eth->soc->txrx.rx_irq_done_mask) { in mtk_handle_irq()
3200 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
3201 eth->soc->txrx.rx_irq_done_mask) in mtk_handle_irq()
3204 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
3205 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
3216 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
3218 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3219 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3220 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
3221 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3222 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3226 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
3229 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
3232 err = mtk_dma_init(eth); in mtk_start_dma()
3234 mtk_dma_free(eth); in mtk_start_dma()
3238 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
3239 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
3244 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_start_dma()
3250 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
3252 mtk_w32(eth, in mtk_start_dma()
3255 reg_map->pdma.glo_cfg); in mtk_start_dma()
3257 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | in mtk_start_dma()
3259 reg_map->pdma.glo_cfg); in mtk_start_dma()
3265 static void mtk_gdm_config(struct mtk_eth *eth, u32 config) in mtk_gdm_config() argument
3269 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
3275 if (!eth->netdev[i]) in mtk_gdm_config()
3278 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3288 if (netdev_uses_dsa(eth->netdev[i])) in mtk_gdm_config()
3291 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3294 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_gdm_config()
3295 mtk_w32(eth, 0, MTK_RST_GL); in mtk_gdm_config()
3303 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK; in mtk_uses_dsa()
3312 struct mtk_eth *eth = mac->hw; in mtk_device_event() local
3336 if (s.base.speed == 0 || s.base.speed == ((__u32)-1)) in mtk_device_event()
3340 if (dp->index >= MTK_QDMA_NUM_QUEUES) in mtk_device_event()
3343 if (mac->speed > 0 && mac->speed <= s.base.speed) in mtk_device_event()
3346 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); in mtk_device_event()
3354 struct mtk_eth *eth = mac->hw; in mtk_open() local
3357 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); in mtk_open()
3365 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
3366 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3370 err = mtk_start_dma(eth); in mtk_open()
3372 phylink_disconnect_phy(mac->phylink); in mtk_open()
3376 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3377 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3379 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe in mtk_open()
3381 mtk_gdm_config(eth, gdm_config); in mtk_open()
3383 napi_enable(ð->tx_napi); in mtk_open()
3384 napi_enable(ð->rx_napi); in mtk_open()
3385 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
3386 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); in mtk_open()
3387 refcount_set(ð->dma_refcnt, 1); in mtk_open()
3390 refcount_inc(ð->dma_refcnt); in mtk_open()
3392 phylink_start(mac->phylink); in mtk_open()
3395 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_open()
3398 if (mtk_uses_dsa(dev) && !eth->prog) { in mtk_open()
3399 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_open()
3400 struct metadata_dst *md_dst = eth->dsa_meta[i]; in mtk_open()
3408 return -ENOMEM; in mtk_open()
3410 md_dst->u.port_info.port_id = i; in mtk_open()
3411 eth->dsa_meta[i] = md_dst; in mtk_open()
3417 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_open()
3420 mtk_w32(eth, val, MTK_CDMP_IG_CTRL); in mtk_open()
3422 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); in mtk_open()
3428 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
3434 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
3435 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3436 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
3438 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
3442 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3454 struct mtk_eth *eth = mac->hw; in mtk_stop() local
3457 phylink_stop(mac->phylink); in mtk_stop()
3461 phylink_disconnect_phy(mac->phylink); in mtk_stop()
3464 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
3467 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); in mtk_stop()
3469 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
3470 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_stop()
3471 napi_disable(ð->tx_napi); in mtk_stop()
3472 napi_disable(ð->rx_napi); in mtk_stop()
3474 cancel_work_sync(ð->rx_dim.work); in mtk_stop()
3475 cancel_work_sync(ð->tx_dim.work); in mtk_stop()
3477 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3478 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3479 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3481 mtk_dma_free(eth); in mtk_stop()
3483 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3484 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3493 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup() local
3497 if (eth->hwlro) { in mtk_xdp_setup()
3499 return -EOPNOTSUPP; in mtk_xdp_setup()
3502 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) { in mtk_xdp_setup()
3504 return -EOPNOTSUPP; in mtk_xdp_setup()
3507 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3511 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3523 switch (xdp->command) { in mtk_xdp()
3525 return mtk_xdp_setup(dev, xdp->prog, xdp->extack); in mtk_xdp()
3527 return -EINVAL; in mtk_xdp()
3531 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
3533 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3538 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3544 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
3548 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) in mtk_clk_disable()
3549 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3552 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
3557 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3565 while (--clk >= 0) in mtk_clk_enable()
3566 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3574 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); in mtk_dim_rx() local
3575 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3579 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3580 dim->profile_ix); in mtk_dim_rx()
3581 spin_lock_bh(ð->dim_lock); in mtk_dim_rx()
3583 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3593 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3594 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3595 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3597 spin_unlock_bh(ð->dim_lock); in mtk_dim_rx()
3599 dim->state = DIM_START_MEASURE; in mtk_dim_rx()
3605 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); in mtk_dim_tx() local
3606 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3610 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3611 dim->profile_ix); in mtk_dim_tx()
3612 spin_lock_bh(ð->dim_lock); in mtk_dim_tx()
3614 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3624 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3625 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3626 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3628 spin_unlock_bh(ð->dim_lock); in mtk_dim_tx()
3630 dim->state = DIM_START_MEASURE; in mtk_dim_tx()
3635 struct mtk_eth *eth = mac->hw; in mtk_set_mcr_max_rx() local
3638 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_set_mcr_max_rx()
3641 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_set_mcr_max_rx()
3654 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); in mtk_set_mcr_max_rx()
3657 static void mtk_hw_reset(struct mtk_eth *eth) in mtk_hw_reset() argument
3661 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3662 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_reset()
3664 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_reset()
3667 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3670 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_reset()
3674 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_reset()
3677 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3683 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); in mtk_hw_reset()
3685 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_hw_reset()
3686 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3688 else if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3689 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3693 static u32 mtk_hw_reset_read(struct mtk_eth *eth) in mtk_hw_reset_read() argument
3697 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); in mtk_hw_reset_read()
3701 static void mtk_hw_warm_reset(struct mtk_eth *eth) in mtk_hw_warm_reset() argument
3705 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, in mtk_hw_warm_reset()
3707 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, in mtk_hw_warm_reset()
3709 dev_err(eth->dev, "warm reset failed\n"); in mtk_hw_warm_reset()
3710 mtk_hw_reset(eth); in mtk_hw_warm_reset()
3714 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_warm_reset()
3716 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3718 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_warm_reset()
3722 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_warm_reset()
3724 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3730 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); in mtk_hw_warm_reset()
3733 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3735 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3739 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); in mtk_hw_warm_reset()
3742 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3744 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3748 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) in mtk_hw_check_dma_hang() argument
3750 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_check_dma_hang()
3758 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_check_dma_hang()
3762 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); in mtk_hw_check_dma_hang()
3764 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); in mtk_hw_check_dma_hang()
3767 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); in mtk_hw_check_dma_hang()
3770 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && in mtk_hw_check_dma_hang()
3771 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && in mtk_hw_check_dma_hang()
3772 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); in mtk_hw_check_dma_hang()
3774 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { in mtk_hw_check_dma_hang()
3775 if (++eth->reset.wdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3776 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3783 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); in mtk_hw_check_dma_hang()
3784 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); in mtk_hw_check_dma_hang()
3786 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; in mtk_hw_check_dma_hang()
3787 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; in mtk_hw_check_dma_hang()
3788 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; in mtk_hw_check_dma_hang()
3789 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; in mtk_hw_check_dma_hang()
3790 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); in mtk_hw_check_dma_hang()
3791 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); in mtk_hw_check_dma_hang()
3796 if (++eth->reset.qdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3797 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3804 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); in mtk_hw_check_dma_hang()
3805 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); in mtk_hw_check_dma_hang()
3806 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && in mtk_hw_check_dma_hang()
3807 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); in mtk_hw_check_dma_hang()
3810 if (++eth->reset.adma_hang_count > 2) { in mtk_hw_check_dma_hang()
3811 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3817 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3818 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3819 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3821 eth->reset.wdidx = wdidx; in mtk_hw_check_dma_hang()
3829 struct mtk_eth *eth = container_of(del_work, struct mtk_eth, in mtk_hw_reset_monitor_work() local
3832 if (test_bit(MTK_RESETTING, ð->state)) in mtk_hw_reset_monitor_work()
3836 if (mtk_hw_check_dma_hang(eth)) in mtk_hw_reset_monitor_work()
3837 schedule_work(ð->pending_work); in mtk_hw_reset_monitor_work()
3840 schedule_delayed_work(ð->reset.monitor_work, in mtk_hw_reset_monitor_work()
3844 static int mtk_hw_init(struct mtk_eth *eth, bool reset) in mtk_hw_init() argument
3848 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
3851 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
3855 pm_runtime_enable(eth->dev); in mtk_hw_init()
3856 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
3858 ret = mtk_clk_enable(eth); in mtk_hw_init()
3863 if (eth->ethsys) in mtk_hw_init()
3864 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
3865 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
3867 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
3868 ret = device_reset(eth->dev); in mtk_hw_init()
3870 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
3875 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3876 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3879 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3880 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3888 mtk_hw_warm_reset(eth); in mtk_hw_init()
3890 mtk_hw_reset(eth); in mtk_hw_init()
3892 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_init()
3894 val = mtk_r32(eth, MTK_FE_GLO_MISC); in mtk_hw_init()
3895 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); in mtk_hw_init()
3898 if (eth->pctl) { in mtk_hw_init()
3900 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
3903 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
3906 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
3914 struct net_device *dev = eth->netdev[i]; in mtk_hw_init()
3919 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); in mtk_hw_init()
3921 dev->mtu + MTK_RX_ETH_HLEN); in mtk_hw_init()
3927 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3928 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3929 if (mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3930 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3931 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3933 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
3937 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3938 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3941 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3942 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3945 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
3946 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
3947 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
3948 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
3949 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
3951 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_init()
3953 mtk_w32(eth, 0x00000302, PSE_DROP_CFG); in mtk_hw_init()
3956 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES); in mtk_hw_init()
3957 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); in mtk_hw_init()
3960 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0)); in mtk_hw_init()
3967 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i); in mtk_hw_init()
3968 } else if (!mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3970 mtk_w32(eth, 0x00000300, PSE_DROP_CFG); in mtk_hw_init()
3973 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); in mtk_hw_init()
3976 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); in mtk_hw_init()
3979 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); in mtk_hw_init()
3980 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); in mtk_hw_init()
3981 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); in mtk_hw_init()
3982 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); in mtk_hw_init()
3983 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); in mtk_hw_init()
3984 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); in mtk_hw_init()
3985 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); in mtk_hw_init()
3986 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); in mtk_hw_init()
3989 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); in mtk_hw_init()
3990 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); in mtk_hw_init()
3991 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); in mtk_hw_init()
3992 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); in mtk_hw_init()
3993 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); in mtk_hw_init()
3994 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); in mtk_hw_init()
3995 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); in mtk_hw_init()
3996 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); in mtk_hw_init()
3999 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); in mtk_hw_init()
4000 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); in mtk_hw_init()
4001 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); in mtk_hw_init()
4002 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); in mtk_hw_init()
4003 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); in mtk_hw_init()
4004 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); in mtk_hw_init()
4011 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
4012 pm_runtime_disable(eth->dev); in mtk_hw_init()
4018 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
4020 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
4023 mtk_clk_disable(eth); in mtk_hw_deinit()
4025 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
4026 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
4034 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
4036 phylink_disconnect_phy(mac->phylink); in mtk_uninit()
4037 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
4038 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
4045 struct mtk_eth *eth = mac->hw; in mtk_change_mtu() local
4047 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
4050 return -EINVAL; in mtk_change_mtu()
4054 dev->mtu = new_mtu; in mtk_change_mtu()
4067 return phylink_mii_ioctl(mac->phylink, ifr, cmd); in mtk_do_ioctl()
4072 return -EOPNOTSUPP; in mtk_do_ioctl()
4075 static void mtk_prepare_for_reset(struct mtk_eth *eth) in mtk_prepare_for_reset() argument
4082 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_prepare_for_reset()
4084 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_prepare_for_reset()
4085 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_prepare_for_reset()
4087 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_prepare_for_reset()
4089 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_prepare_for_reset()
4093 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_prepare_for_reset()
4094 mtk_ppe_prepare_reset(eth->ppe[i]); in mtk_prepare_for_reset()
4097 mtk_w32(eth, 0, MTK_FE_INT_ENABLE); in mtk_prepare_for_reset()
4101 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; in mtk_prepare_for_reset()
4102 mtk_w32(eth, val, MTK_MAC_MCR(i)); in mtk_prepare_for_reset()
4108 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
4114 set_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4116 mtk_prepare_for_reset(eth); in mtk_pending_work()
4121 mtk_prepare_for_reset(eth); in mtk_pending_work()
4125 if (!eth->netdev[i] || !netif_running(eth->netdev[i])) in mtk_pending_work()
4128 mtk_stop(eth->netdev[i]); in mtk_pending_work()
4134 if (eth->dev->pins) in mtk_pending_work()
4135 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
4136 eth->dev->pins->default_state); in mtk_pending_work()
4137 mtk_hw_init(eth, true); in mtk_pending_work()
4141 if (!eth->netdev[i] || !test_bit(i, &restart)) in mtk_pending_work()
4144 if (mtk_open(eth->netdev[i])) { in mtk_pending_work()
4145 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
4147 dev_close(eth->netdev[i]); in mtk_pending_work()
4153 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_pending_work()
4155 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_pending_work()
4156 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_pending_work()
4158 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_pending_work()
4161 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_pending_work()
4164 clear_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4171 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
4176 if (!eth->netdev[i]) in mtk_free_dev()
4178 free_netdev(eth->netdev[i]); in mtk_free_dev()
4181 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_free_dev()
4182 if (!eth->dsa_meta[i]) in mtk_free_dev()
4184 metadata_dst_free(eth->dsa_meta[i]); in mtk_free_dev()
4190 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
4196 if (!eth->netdev[i]) in mtk_unreg_dev()
4198 mac = netdev_priv(eth->netdev[i]); in mtk_unreg_dev()
4199 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_unreg_dev()
4200 unregister_netdevice_notifier(&mac->device_notifier); in mtk_unreg_dev()
4201 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
4207 static void mtk_sgmii_destroy(struct mtk_eth *eth) in mtk_sgmii_destroy() argument
4212 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]); in mtk_sgmii_destroy()
4215 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
4217 mtk_sgmii_destroy(eth); in mtk_cleanup()
4218 mtk_unreg_dev(eth); in mtk_cleanup()
4219 mtk_free_dev(eth); in mtk_cleanup()
4220 cancel_work_sync(ð->pending_work); in mtk_cleanup()
4221 cancel_delayed_work_sync(ð->reset.monitor_work); in mtk_cleanup()
4231 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_get_link_ksettings()
4232 return -EBUSY; in mtk_get_link_ksettings()
4234 return phylink_ethtool_ksettings_get(mac->phylink, cmd); in mtk_get_link_ksettings()
4242 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_set_link_ksettings()
4243 return -EBUSY; in mtk_set_link_ksettings()
4245 return phylink_ethtool_ksettings_set(mac->phylink, cmd); in mtk_set_link_ksettings()
4253 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); in mtk_get_drvinfo()
4254 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); in mtk_get_drvinfo()
4255 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); in mtk_get_drvinfo()
4262 return mac->hw->msg_enable; in mtk_get_msglevel()
4269 mac->hw->msg_enable = value; in mtk_set_msglevel()
4276 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_nway_reset()
4277 return -EBUSY; in mtk_nway_reset()
4279 if (!mac->phylink) in mtk_nway_reset()
4280 return -ENOTSUPP; in mtk_nway_reset()
4282 return phylink_ethtool_nway_reset(mac->phylink); in mtk_nway_reset()
4297 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_strings()
4313 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_sset_count()
4318 return -EOPNOTSUPP; in mtk_get_sset_count()
4322 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) in mtk_ethtool_pp_stats() argument
4327 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
4328 struct mtk_rx_ring *ring = ð->rx_ring[i]; in mtk_ethtool_pp_stats()
4330 if (!ring->page_pool) in mtk_ethtool_pp_stats()
4333 page_pool_get_stats(ring->page_pool, &stats); in mtk_ethtool_pp_stats()
4342 struct mtk_hw_stats *hwstats = mac->hw_stats; in mtk_get_ethtool_stats()
4347 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_get_ethtool_stats()
4351 if (spin_trylock_bh(&hwstats->stats_lock)) { in mtk_get_ethtool_stats()
4353 spin_unlock_bh(&hwstats->stats_lock); in mtk_get_ethtool_stats()
4361 start = u64_stats_fetch_begin(&hwstats->syncp); in mtk_get_ethtool_stats()
4365 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_ethtool_stats()
4366 mtk_ethtool_pp_stats(mac->hw, data_dst); in mtk_get_ethtool_stats()
4367 } while (u64_stats_fetch_retry(&hwstats->syncp, start)); in mtk_get_ethtool_stats()
4373 int ret = -EOPNOTSUPP; in mtk_get_rxnfc()
4375 switch (cmd->cmd) { in mtk_get_rxnfc()
4377 if (dev->hw_features & NETIF_F_LRO) { in mtk_get_rxnfc()
4378 cmd->data = MTK_MAX_RX_RING_NUM; in mtk_get_rxnfc()
4383 if (dev->hw_features & NETIF_F_LRO) { in mtk_get_rxnfc()
4386 cmd->rule_cnt = mac->hwlro_ip_cnt; in mtk_get_rxnfc()
4391 if (dev->hw_features & NETIF_F_LRO) in mtk_get_rxnfc()
4395 if (dev->hw_features & NETIF_F_LRO) in mtk_get_rxnfc()
4408 int ret = -EOPNOTSUPP; in mtk_set_rxnfc()
4410 switch (cmd->cmd) { in mtk_set_rxnfc()
4412 if (dev->hw_features & NETIF_F_LRO) in mtk_set_rxnfc()
4416 if (dev->hw_features & NETIF_F_LRO) in mtk_set_rxnfc()
4435 queue = mac->id; in mtk_select_queue()
4437 if (queue >= dev->num_tx_queues) in mtk_select_queue()
4480 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
4491 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
4492 return -EINVAL; in mtk_add_mac()
4497 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
4498 return -EINVAL; in mtk_add_mac()
4501 if (eth->netdev[id]) { in mtk_add_mac()
4502 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
4503 return -EINVAL; in mtk_add_mac()
4506 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_add_mac()
4509 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); in mtk_add_mac()
4510 if (!eth->netdev[id]) { in mtk_add_mac()
4511 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
4512 return -ENOMEM; in mtk_add_mac()
4514 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
4515 eth->mac[id] = mac; in mtk_add_mac()
4516 mac->id = id; in mtk_add_mac()
4517 mac->hw = eth; in mtk_add_mac()
4518 mac->of_node = np; in mtk_add_mac()
4520 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]); in mtk_add_mac()
4521 if (err == -EPROBE_DEFER) in mtk_add_mac()
4526 eth_hw_addr_random(eth->netdev[id]); in mtk_add_mac()
4527 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_add_mac()
4528 eth->netdev[id]->dev_addr); in mtk_add_mac()
4531 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); in mtk_add_mac()
4532 mac->hwlro_ip_cnt = 0; in mtk_add_mac()
4534 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
4535 sizeof(*mac->hw_stats), in mtk_add_mac()
4537 if (!mac->hw_stats) { in mtk_add_mac()
4538 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
4539 err = -ENOMEM; in mtk_add_mac()
4542 spin_lock_init(&mac->hw_stats->stats_lock); in mtk_add_mac()
4543 u64_stats_init(&mac->hw_stats->syncp); in mtk_add_mac()
4545 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_add_mac()
4546 mac->hw_stats->reg_offset = id * 0x80; in mtk_add_mac()
4548 mac->hw_stats->reg_offset = id * 0x40; in mtk_add_mac()
4553 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
4558 mac->interface = PHY_INTERFACE_MODE_NA; in mtk_add_mac()
4559 mac->speed = SPEED_UNKNOWN; in mtk_add_mac()
4561 mac->phylink_config.dev = ð->netdev[id]->dev; in mtk_add_mac()
4562 mac->phylink_config.type = PHYLINK_NETDEV; in mtk_add_mac()
4563 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | in mtk_add_mac()
4566 /* MT7623 gmac0 is now missing its speed-specific PLL configuration in mtk_add_mac()
4567 * in its .mac_config method (since state->speed is not valid there. in mtk_add_mac()
4570 if (!mac->hw->soc->disable_pll_modes || mac->id != 0) { in mtk_add_mac()
4572 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4574 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4576 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) in mtk_add_mac()
4577 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); in mtk_add_mac()
4580 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id) in mtk_add_mac()
4582 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4585 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && in mtk_add_mac()
4586 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) { in mtk_add_mac()
4587 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mtk_add_mac()
4590 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4593 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { in mtk_add_mac()
4595 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4597 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4599 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4602 if (mtk_is_netsys_v3_or_greater(mac->hw) && in mtk_add_mac()
4603 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) && in mtk_add_mac()
4605 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | in mtk_add_mac()
4608 phy_interface_zero(mac->phylink_config.supported_interfaces); in mtk_add_mac()
4610 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4613 phylink = phylink_create(&mac->phylink_config, in mtk_add_mac()
4614 of_fwnode_handle(mac->of_node), in mtk_add_mac()
4621 mac->phylink = phylink; in mtk_add_mac()
4623 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
4624 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
4625 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
4626 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
4628 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
4629 if (eth->hwlro) in mtk_add_mac()
4630 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
4632 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
4634 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
4635 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
4637 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
4638 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
4640 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
4641 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
4643 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
4645 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_add_mac()
4646 mac->device_notifier.notifier_call = mtk_device_event; in mtk_add_mac()
4647 register_netdevice_notifier(&mac->device_notifier); in mtk_add_mac()
4650 if (mtk_page_pool_enabled(eth)) in mtk_add_mac()
4651 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC | in mtk_add_mac()
4659 free_netdev(eth->netdev[id]); in mtk_add_mac()
4663 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) in mtk_eth_set_dma_device() argument
4672 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
4674 if (!dev || !(dev->flags & IFF_UP)) in mtk_eth_set_dma_device()
4677 list_add_tail(&dev->close_list, &dev_list); in mtk_eth_set_dma_device()
4682 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
4685 list_del_init(&dev->close_list); in mtk_eth_set_dma_device()
4692 static int mtk_sgmii_init(struct mtk_eth *eth) in mtk_sgmii_init() argument
4700 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i); in mtk_sgmii_init()
4714 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap, in mtk_sgmii_init()
4715 eth->soc->ana_rgc3, in mtk_sgmii_init()
4726 struct mtk_eth *eth; in mtk_probe() local
4729 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
4730 if (!eth) in mtk_probe()
4731 return -ENOMEM; in mtk_probe()
4733 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
4735 eth->dev = &pdev->dev; in mtk_probe()
4736 eth->dma_dev = &pdev->dev; in mtk_probe()
4737 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
4738 if (IS_ERR(eth->base)) in mtk_probe()
4739 return PTR_ERR(eth->base); in mtk_probe()
4741 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
4742 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
4744 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4749 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4750 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1); in mtk_probe()
4751 if (IS_ERR(eth->sram_base)) in mtk_probe()
4752 return PTR_ERR(eth->sram_base); in mtk_probe()
4754 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4758 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_probe()
4759 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36)); in mtk_probe()
4761 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); in mtk_probe()
4764 dev_err(&pdev->dev, "Wrong DMA config\n"); in mtk_probe()
4765 return -EINVAL; in mtk_probe()
4769 spin_lock_init(ð->page_lock); in mtk_probe()
4770 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
4771 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
4772 spin_lock_init(ð->dim_lock); in mtk_probe()
4774 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4775 INIT_WORK(ð->rx_dim.work, mtk_dim_rx); in mtk_probe()
4776 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work); in mtk_probe()
4778 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4779 INIT_WORK(ð->tx_dim.work, mtk_dim_tx); in mtk_probe()
4781 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4782 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4784 if (IS_ERR(eth->ethsys)) { in mtk_probe()
4785 dev_err(&pdev->dev, "no ethsys regmap found\n"); in mtk_probe()
4786 return PTR_ERR(eth->ethsys); in mtk_probe()
4790 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
4791 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4793 if (IS_ERR(eth->infra)) { in mtk_probe()
4794 dev_err(&pdev->dev, "no infracfg regmap found\n"); in mtk_probe()
4795 return PTR_ERR(eth->infra); in mtk_probe()
4799 if (of_dma_is_coherent(pdev->dev.of_node)) { in mtk_probe()
4802 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4803 "cci-control-port"); in mtk_probe()
4809 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
4810 err = mtk_sgmii_init(eth); in mtk_probe()
4816 if (eth->soc->required_pctl) { in mtk_probe()
4817 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4819 if (IS_ERR(eth->pctl)) { in mtk_probe()
4820 dev_err(&pdev->dev, "no pctl regmap found\n"); in mtk_probe()
4821 err = PTR_ERR(eth->pctl); in mtk_probe()
4826 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_probe()
4829 err = -EINVAL; in mtk_probe()
4832 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4833 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4836 err = -EINVAL; in mtk_probe()
4839 eth->phy_scratch_ring = res_sram->start; in mtk_probe()
4841 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4846 if (eth->soc->offload_version) { in mtk_probe()
4852 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
4855 np = of_parse_phandle(pdev->dev.of_node, in mtk_probe()
4860 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
4861 wdma_phy = res ? res->start + wdma_base : 0; in mtk_probe()
4862 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
4868 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
4869 eth->irq[i] = eth->irq[0]; in mtk_probe()
4871 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
4872 if (eth->irq[i] < 0) { in mtk_probe()
4873 dev_err(&pdev->dev, "no IRQ%d resource found\n", i); in mtk_probe()
4874 err = -ENXIO; in mtk_probe()
4878 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
4879 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
4881 if (IS_ERR(eth->clks[i])) { in mtk_probe()
4882 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
4883 err = -EPROBE_DEFER; in mtk_probe()
4886 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
4887 dev_err(&pdev->dev, "clock %s not found\n", in mtk_probe()
4889 err = -EINVAL; in mtk_probe()
4892 eth->clks[i] = NULL; in mtk_probe()
4896 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
4897 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
4899 err = mtk_hw_init(eth, false); in mtk_probe()
4903 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
4905 for_each_child_of_node(pdev->dev.of_node, mac_np) { in mtk_probe()
4907 "mediatek,eth-mac")) in mtk_probe()
4913 err = mtk_add_mac(eth, mac_np); in mtk_probe()
4920 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
4921 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
4923 dev_name(eth->dev), eth); in mtk_probe()
4925 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
4927 dev_name(eth->dev), eth); in mtk_probe()
4931 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
4933 dev_name(eth->dev), eth); in mtk_probe()
4939 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4940 err = mtk_mdio_init(eth); in mtk_probe()
4945 if (eth->soc->offload_version) { in mtk_probe()
4946 u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; in mtk_probe()
4948 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); in mtk_probe()
4950 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; in mtk_probe()
4952 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); in mtk_probe()
4954 if (!eth->ppe[i]) { in mtk_probe()
4955 err = -ENOMEM; in mtk_probe()
4960 err = mtk_eth_offload_init(eth); in mtk_probe()
4966 if (!eth->netdev[i]) in mtk_probe()
4969 err = register_netdev(eth->netdev[i]); in mtk_probe()
4971 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
4974 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
4976 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
4982 init_dummy_netdev(ð->dummy_dev); in mtk_probe()
4983 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx); in mtk_probe()
4984 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); in mtk_probe()
4986 platform_set_drvdata(pdev, eth); in mtk_probe()
4987 schedule_delayed_work(ð->reset.monitor_work, in mtk_probe()
4993 mtk_ppe_deinit(eth); in mtk_probe()
4994 mtk_mdio_cleanup(eth); in mtk_probe()
4996 mtk_free_dev(eth); in mtk_probe()
4998 mtk_hw_deinit(eth); in mtk_probe()
5002 mtk_sgmii_destroy(eth); in mtk_probe()
5009 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
5015 if (!eth->netdev[i]) in mtk_remove()
5017 mtk_stop(eth->netdev[i]); in mtk_remove()
5018 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
5019 phylink_disconnect_phy(mac->phylink); in mtk_remove()
5023 mtk_hw_deinit(eth); in mtk_remove()
5025 netif_napi_del(ð->tx_napi); in mtk_remove()
5026 netif_napi_del(ð->rx_napi); in mtk_remove()
5027 mtk_cleanup(eth); in mtk_remove()
5028 mtk_mdio_cleanup(eth); in mtk_remove()
5216 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5217 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5218 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5219 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5220 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5221 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5222 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5223 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5224 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },