Lines Matching refs:eth
282 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) in mtk_w32() argument
284 __raw_writel(val, eth->base + reg); in mtk_w32()
287 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
289 return __raw_readl(eth->base + reg); in mtk_r32()
292 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg) in mtk_m32() argument
296 val = mtk_r32(eth, reg); in mtk_m32()
299 mtk_w32(eth, val, reg); in mtk_m32()
303 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
308 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
315 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
319 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, in _mtk_mdio_write_c22() argument
324 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
328 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c22()
336 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
343 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write_c45() argument
348 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
352 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
360 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
364 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
372 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
379 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) in _mtk_mdio_read_c22() argument
383 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
387 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c22()
394 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
398 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c22()
401 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_read_c45() argument
406 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
410 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
418 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
422 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
429 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
433 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c45()
439 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c22() local
441 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val); in mtk_mdio_write_c22()
447 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c45() local
449 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val); in mtk_mdio_write_c45()
454 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c22() local
456 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg); in mtk_mdio_read_c22()
462 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c45() local
464 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg); in mtk_mdio_read_c45()
467 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, in mt7621_gmac0_rgmii_adjust() argument
475 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
481 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, in mtk_gmac0_rgmii_adjust() argument
487 mtk_w32(eth, TRGMII_MODE, INTF_MODE); in mtk_gmac0_rgmii_adjust()
488 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000); in mtk_gmac0_rgmii_adjust()
490 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
494 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n"); in mtk_gmac0_rgmii_adjust()
497 static void mtk_setup_bridge_switch(struct mtk_eth *eth) in mtk_setup_bridge_switch() argument
500 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID), in mtk_setup_bridge_switch()
504 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK, in mtk_setup_bridge_switch()
515 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs() local
520 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
523 return eth->sgmii_pcs[sid]; in mtk_mac_select_pcs()
534 struct mtk_eth *eth = mac->hw; in mtk_mac_config() local
539 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
549 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
550 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
558 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
563 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
564 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
612 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
615 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
626 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
628 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
635 dev_err(eth->dev, in mtk_mac_config()
641 if (mtk_is_netsys_v3_or_greater(eth) && in mtk_mac_config()
646 mtk_setup_bridge_switch(eth); in mtk_mac_config()
652 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
657 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
666 struct mtk_eth *eth = mac->hw; in mtk_mac_finish() local
672 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
699 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, in mtk_set_queue_speed() argument
702 const struct mtk_soc_data *soc = eth->soc; in mtk_set_queue_speed()
713 if (mtk_is_netsys_v1(eth)) in mtk_set_queue_speed()
765 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_set_queue_speed()
816 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
823 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
825 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
834 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
835 if (!eth->mii_bus) { in mtk_mdio_init()
840 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
841 eth->mii_bus->read = mtk_mdio_read_c22; in mtk_mdio_init()
842 eth->mii_bus->write = mtk_mdio_write_c22; in mtk_mdio_init()
843 eth->mii_bus->read_c45 = mtk_mdio_read_c45; in mtk_mdio_init()
844 eth->mii_bus->write_c45 = mtk_mdio_write_c45; in mtk_mdio_init()
845 eth->mii_bus->priv = eth; in mtk_mdio_init()
846 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
848 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
852 dev_err(eth->dev, "MDIO clock frequency out of range"); in mtk_mdio_init()
861 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
862 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); in mtk_mdio_init()
866 if (!mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
868 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); in mtk_mdio_init()
870 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); in mtk_mdio_init()
872 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
879 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
881 if (!eth->mii_bus) in mtk_mdio_cleanup()
884 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
887 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
892 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
893 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
894 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
895 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
898 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
903 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
904 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
905 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
906 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
909 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
914 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
915 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
916 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
917 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
920 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
925 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
926 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
927 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
928 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
935 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address() local
945 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
966 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac() local
970 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
978 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
1001 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_stats_update_mac()
1031 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
1036 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
1038 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
1039 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
1040 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
1099 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, in mtk_rx_get_desc() argument
1109 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_rx_get_desc()
1129 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
1131 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
1137 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) in mtk_init_fq_dma()
1138 eth->scratch_ring = eth->sram_base; in mtk_init_fq_dma()
1140 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, in mtk_init_fq_dma()
1142 ð->phy_scratch_ring, in mtk_init_fq_dma()
1144 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
1147 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
1148 if (unlikely(!eth->scratch_head)) in mtk_init_fq_dma()
1151 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
1152 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
1154 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
1157 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); in mtk_init_fq_dma()
1162 txd = eth->scratch_ring + i * soc->txrx.txd_size; in mtk_init_fq_dma()
1165 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
1170 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_init_fq_dma()
1178 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
1179 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
1180 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
1181 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
1210 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in mtk_tx_unmap() argument
1213 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1215 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1220 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1227 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1234 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1264 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in setup_tx_buf() argument
1268 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1291 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1() local
1322 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2() local
1331 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_tx_set_dma_desc_v2()
1359 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev)) in mtk_tx_set_dma_desc_v2()
1377 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc() local
1379 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_set_dma_desc()
1400 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
1401 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1418 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1420 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1427 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, in mtk_tx_map()
1460 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1463 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1476 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, in mtk_tx_map()
1508 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1514 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); in mtk_tx_map()
1524 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_tx_map()
1537 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) in mtk_cal_txd_req() argument
1546 eth->soc->txrx.dma_max_len); in mtk_cal_txd_req()
1555 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
1560 if (!eth->netdev[i]) in mtk_queue_stopped()
1562 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1569 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
1574 if (!eth->netdev[i]) in mtk_wake_queue()
1576 netif_tx_wake_all_queues(eth->netdev[i]); in mtk_wake_queue()
1583 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
1584 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
1593 spin_lock(ð->page_lock); in mtk_start_xmit()
1595 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
1598 tx_num = mtk_cal_txd_req(eth, skb); in mtk_start_xmit()
1601 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
1603 spin_unlock(ð->page_lock); in mtk_start_xmit()
1610 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
1628 spin_unlock(ð->page_lock); in mtk_start_xmit()
1633 spin_unlock(ð->page_lock); in mtk_start_xmit()
1639 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
1645 if (!eth->hwlro) in mtk_get_rx_ring()
1646 return ð->rx_ring[0]; in mtk_get_rx_ring()
1651 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
1653 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_get_rx_ring()
1663 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
1668 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1669 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
1670 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1673 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
1676 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1682 static bool mtk_page_pool_enabled(struct mtk_eth *eth) in mtk_page_pool_enabled() argument
1684 return mtk_is_netsys_v2_or_greater(eth); in mtk_page_pool_enabled()
1687 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, in mtk_create_page_pool() argument
1696 .dev = eth->dma_dev, in mtk_create_page_pool()
1703 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1709 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, in mtk_create_page_pool()
1710 eth->rx_napi.napi_id, PAGE_SIZE); in mtk_create_page_pool()
1750 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, in mtk_xdp_frame_map() argument
1755 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_frame_map()
1760 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1762 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1771 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1781 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1787 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, in mtk_xdp_submit_frame() argument
1791 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1792 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_submit_frame()
1805 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_xdp_submit_frame()
1812 spin_lock(ð->page_lock); in mtk_xdp_submit_frame()
1816 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1826 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, in mtk_xdp_submit_frame()
1874 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
1879 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
1883 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1890 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_xdp_submit_frame()
1902 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1912 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit() local
1919 if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) in mtk_xdp_xmit()
1932 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, in mtk_xdp_run() argument
1943 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
1963 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) { in mtk_xdp_run()
1996 struct mtk_eth *eth) in mtk_poll_rx() argument
2015 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
2020 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_poll_rx()
2023 if (!mtk_rx_get_desc(eth, &trxd, rxd)) in mtk_poll_rx()
2027 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_poll_rx()
2041 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
2047 !eth->netdev[mac])) in mtk_poll_rx()
2050 netdev = eth->netdev[mac]; in mtk_poll_rx()
2052 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
2071 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
2080 ret = mtk_xdp_run(eth, ring, &xdp, netdev); in mtk_poll_rx()
2109 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
2110 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
2112 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
2119 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_poll_rx()
2122 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), in mtk_poll_rx()
2139 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_poll_rx()
2155 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) in mtk_poll_rx()
2164 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) && in mtk_poll_rx()
2168 if (port < ARRAY_SIZE(eth->dsa_meta) && in mtk_poll_rx()
2169 eth->dsa_meta[port]) in mtk_poll_rx()
2170 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); in mtk_poll_rx()
2174 mtk_ppe_check_skb(eth->ppe[0], skb, hash); in mtk_poll_rx()
2183 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_poll_rx()
2191 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
2206 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
2209 eth->rx_packets += done; in mtk_poll_rx()
2210 eth->rx_bytes += bytes; in mtk_poll_rx()
2211 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
2213 net_dim(ð->rx_dim, dim_sample); in mtk_poll_rx()
2229 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, in mtk_poll_tx_done() argument
2237 eth->tx_packets++; in mtk_poll_tx_done()
2238 eth->tx_bytes += bytes; in mtk_poll_tx_done()
2240 dev = eth->netdev[mac]; in mtk_poll_tx_done()
2259 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_qdma() argument
2262 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
2263 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_qdma()
2270 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2283 eth->soc->txrx.txd_size); in mtk_poll_tx_qdma()
2289 mtk_poll_tx_done(eth, state, tx_buf->mac_id, in mtk_poll_tx_qdma()
2294 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_qdma()
2304 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2309 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_pdma() argument
2312 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_pdma()
2319 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); in mtk_poll_tx_pdma()
2329 mtk_poll_tx_done(eth, state, 0, tx_buf->data); in mtk_poll_tx_pdma()
2332 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_pdma()
2334 desc = ring->dma + cpu * eth->soc->txrx.txd_size; in mtk_poll_tx_pdma()
2347 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
2349 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
2353 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2354 budget = mtk_poll_tx_qdma(eth, budget, &state); in mtk_poll_tx()
2356 budget = mtk_poll_tx_pdma(eth, budget, &state); in mtk_poll_tx()
2361 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2363 net_dim(ð->tx_dim, dim_sample); in mtk_poll_tx()
2365 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
2367 mtk_wake_queue(eth); in mtk_poll_tx()
2372 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
2374 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
2377 mtk_stats_update(eth); in mtk_handle_status_irq()
2378 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
2385 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
2386 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2389 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2390 mtk_handle_status_irq(eth); in mtk_napi_tx()
2391 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2392 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
2394 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
2395 dev_info(eth->dev, in mtk_napi_tx()
2397 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2398 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2404 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2408 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
2415 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
2416 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2419 mtk_handle_status_irq(eth); in mtk_napi_rx()
2424 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, in mtk_napi_rx()
2426 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2429 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
2430 dev_info(eth->dev, in mtk_napi_rx()
2432 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2433 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2439 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2440 eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2443 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2448 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
2450 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2451 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
2468 ring->dma = eth->sram_base + ring_size * sz; in mtk_tx_alloc()
2469 ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; in mtk_tx_alloc()
2471 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2486 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_tx_alloc()
2499 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2523 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2524 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2525 mtk_w32(eth, in mtk_tx_alloc()
2528 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2532 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); in mtk_tx_alloc()
2539 if (mtk_is_netsys_v1(eth)) in mtk_tx_alloc()
2541 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_tx_alloc()
2545 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); in mtk_tx_alloc()
2546 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_alloc()
2547 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); in mtk_tx_alloc()
2549 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2550 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); in mtk_tx_alloc()
2551 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); in mtk_tx_alloc()
2552 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2561 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
2563 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2564 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
2569 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2574 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2581 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2588 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
2590 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2595 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_rx_alloc()
2603 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
2605 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
2623 if (mtk_page_pool_enabled(eth)) { in mtk_rx_alloc()
2626 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2634 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || in mtk_rx_alloc()
2636 ring->dma = dma_alloc_coherent(eth->dma_dev, in mtk_rx_alloc()
2637 rx_dma_size * eth->soc->txrx.rxd_size, in mtk_rx_alloc()
2640 struct mtk_tx_ring *tx_ring = ð->tx_ring; in mtk_rx_alloc()
2643 eth->soc->txrx.txd_size * (ring_no + 1); in mtk_rx_alloc()
2645 eth->soc->txrx.txd_size * (ring_no + 1); in mtk_rx_alloc()
2656 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_alloc()
2671 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2672 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2674 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2683 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2688 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_alloc()
2693 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_rx_alloc()
2716 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2718 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2720 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2723 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2725 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2727 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2730 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2735 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram) in mtk_rx_clean() argument
2747 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_clean()
2751 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_clean()
2754 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), in mtk_rx_clean()
2763 dma_free_coherent(eth->dma_dev, in mtk_rx_clean()
2764 ring->dma_size * eth->soc->txrx.rxd_size, in mtk_rx_clean()
2777 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
2801 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
2802 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
2803 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
2813 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
2816 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
2819 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
2831 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
2832 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
2837 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
2843 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2847 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2857 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
2860 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2863 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
2867 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2870 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2872 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
2875 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2878 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
2882 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2885 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2887 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2909 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
2922 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
2933 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
2944 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
2952 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
2959 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
3043 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
3049 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
3050 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
3052 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
3054 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
3058 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
3063 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
3068 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
3071 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3075 err = mtk_init_fq_dma(eth); in mtk_dma_init()
3080 err = mtk_tx_alloc(eth); in mtk_dma_init()
3084 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3085 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
3090 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
3094 if (eth->hwlro) { in mtk_dma_init()
3096 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
3100 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
3105 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3109 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | in mtk_dma_init()
3110 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
3111 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
3117 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
3119 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
3122 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_free()
3126 if (!eth->netdev[i]) in mtk_dma_free()
3130 netdev_tx_reset_subqueue(eth->netdev[i], j); in mtk_dma_free()
3133 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { in mtk_dma_free()
3134 dma_free_coherent(eth->dma_dev, in mtk_dma_free()
3136 eth->scratch_ring, eth->phy_scratch_ring); in mtk_dma_free()
3137 eth->scratch_ring = NULL; in mtk_dma_free()
3138 eth->phy_scratch_ring = 0; in mtk_dma_free()
3140 mtk_tx_clean(eth); in mtk_dma_free()
3141 mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM)); in mtk_dma_free()
3142 mtk_rx_clean(eth, ð->rx_ring_qdma, false); in mtk_dma_free()
3144 if (eth->hwlro) { in mtk_dma_free()
3145 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
3147 mtk_rx_clean(eth, ð->rx_ring[i], false); in mtk_dma_free()
3150 kfree(eth->scratch_head); in mtk_dma_free()
3153 static bool mtk_hw_reset_check(struct mtk_eth *eth) in mtk_hw_reset_check() argument
3155 u32 val = mtk_r32(eth, MTK_INT_STATUS2); in mtk_hw_reset_check()
3165 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
3167 if (test_bit(MTK_RESETTING, ð->state)) in mtk_tx_timeout()
3170 if (!mtk_hw_reset_check(eth)) in mtk_tx_timeout()
3173 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
3174 netif_err(eth, tx_err, dev, "transmit timed out\n"); in mtk_tx_timeout()
3176 schedule_work(ð->pending_work); in mtk_tx_timeout()
3181 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
3183 eth->rx_events++; in mtk_handle_irq_rx()
3184 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
3185 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_handle_irq_rx()
3186 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
3194 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
3196 eth->tx_events++; in mtk_handle_irq_tx()
3197 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
3198 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
3199 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
3207 struct mtk_eth *eth = _eth; in mtk_handle_irq() local
3208 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
3210 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
3211 eth->soc->txrx.rx_irq_done_mask) { in mtk_handle_irq()
3212 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
3213 eth->soc->txrx.rx_irq_done_mask) in mtk_handle_irq()
3216 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
3217 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
3228 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
3230 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3231 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3232 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
3233 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3234 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3238 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
3241 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
3244 err = mtk_dma_init(eth); in mtk_start_dma()
3246 mtk_dma_free(eth); in mtk_start_dma()
3250 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
3251 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
3256 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_start_dma()
3262 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
3264 mtk_w32(eth, in mtk_start_dma()
3269 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | in mtk_start_dma()
3277 static void mtk_gdm_config(struct mtk_eth *eth, u32 config) in mtk_gdm_config() argument
3281 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
3287 if (!eth->netdev[i]) in mtk_gdm_config()
3290 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3300 if (netdev_uses_dsa(eth->netdev[i])) in mtk_gdm_config()
3303 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3306 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_gdm_config()
3307 mtk_w32(eth, 0, MTK_RST_GL); in mtk_gdm_config()
3324 struct mtk_eth *eth = mac->hw; in mtk_device_event() local
3358 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); in mtk_device_event()
3366 struct mtk_eth *eth = mac->hw; in mtk_open() local
3377 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
3378 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3382 err = mtk_start_dma(eth); in mtk_open()
3388 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3389 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3393 mtk_gdm_config(eth, gdm_config); in mtk_open()
3395 napi_enable(ð->tx_napi); in mtk_open()
3396 napi_enable(ð->rx_napi); in mtk_open()
3397 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
3398 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); in mtk_open()
3399 refcount_set(ð->dma_refcnt, 1); in mtk_open()
3402 refcount_inc(ð->dma_refcnt); in mtk_open()
3407 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_open()
3410 if (mtk_uses_dsa(dev) && !eth->prog) { in mtk_open()
3411 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_open()
3412 struct metadata_dst *md_dst = eth->dsa_meta[i]; in mtk_open()
3423 eth->dsa_meta[i] = md_dst; in mtk_open()
3429 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_open()
3432 mtk_w32(eth, val, MTK_CDMP_IG_CTRL); in mtk_open()
3434 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); in mtk_open()
3440 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
3446 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
3447 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3448 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
3450 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
3454 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3466 struct mtk_eth *eth = mac->hw; in mtk_stop() local
3476 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
3479 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); in mtk_stop()
3481 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
3482 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_stop()
3483 napi_disable(ð->tx_napi); in mtk_stop()
3484 napi_disable(ð->rx_napi); in mtk_stop()
3486 cancel_work_sync(ð->rx_dim.work); in mtk_stop()
3487 cancel_work_sync(ð->tx_dim.work); in mtk_stop()
3489 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3490 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3491 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3493 mtk_dma_free(eth); in mtk_stop()
3495 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3496 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3505 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup() local
3509 if (eth->hwlro) { in mtk_xdp_setup()
3519 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3523 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3543 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
3545 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3550 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3556 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
3561 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3564 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
3569 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3578 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3586 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); in mtk_dim_rx() local
3587 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3591 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3593 spin_lock_bh(ð->dim_lock); in mtk_dim_rx()
3595 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3605 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3606 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3607 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3609 spin_unlock_bh(ð->dim_lock); in mtk_dim_rx()
3617 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); in mtk_dim_tx() local
3618 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3622 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3624 spin_lock_bh(ð->dim_lock); in mtk_dim_tx()
3626 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3636 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3637 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3638 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3640 spin_unlock_bh(ð->dim_lock); in mtk_dim_tx()
3647 struct mtk_eth *eth = mac->hw; in mtk_set_mcr_max_rx() local
3650 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_set_mcr_max_rx()
3669 static void mtk_hw_reset(struct mtk_eth *eth) in mtk_hw_reset() argument
3673 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3674 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_reset()
3676 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_reset()
3679 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3682 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_reset()
3686 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_reset()
3689 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3695 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); in mtk_hw_reset()
3697 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_hw_reset()
3698 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3700 else if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3701 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3705 static u32 mtk_hw_reset_read(struct mtk_eth *eth) in mtk_hw_reset_read() argument
3709 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); in mtk_hw_reset_read()
3713 static void mtk_hw_warm_reset(struct mtk_eth *eth) in mtk_hw_warm_reset() argument
3717 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, in mtk_hw_warm_reset()
3719 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, in mtk_hw_warm_reset()
3721 dev_err(eth->dev, "warm reset failed\n"); in mtk_hw_warm_reset()
3722 mtk_hw_reset(eth); in mtk_hw_warm_reset()
3726 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_warm_reset()
3728 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3730 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_warm_reset()
3734 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_warm_reset()
3736 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3742 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); in mtk_hw_warm_reset()
3745 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3747 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3751 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); in mtk_hw_warm_reset()
3754 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3756 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3760 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) in mtk_hw_check_dma_hang() argument
3762 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_check_dma_hang()
3770 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_check_dma_hang()
3774 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); in mtk_hw_check_dma_hang()
3776 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); in mtk_hw_check_dma_hang()
3779 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); in mtk_hw_check_dma_hang()
3782 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && in mtk_hw_check_dma_hang()
3783 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && in mtk_hw_check_dma_hang()
3784 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); in mtk_hw_check_dma_hang()
3786 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { in mtk_hw_check_dma_hang()
3787 if (++eth->reset.wdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3788 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3795 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); in mtk_hw_check_dma_hang()
3796 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); in mtk_hw_check_dma_hang()
3798 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; in mtk_hw_check_dma_hang()
3799 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; in mtk_hw_check_dma_hang()
3800 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; in mtk_hw_check_dma_hang()
3801 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; in mtk_hw_check_dma_hang()
3802 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); in mtk_hw_check_dma_hang()
3803 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); in mtk_hw_check_dma_hang()
3808 if (++eth->reset.qdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3809 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3816 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); in mtk_hw_check_dma_hang()
3817 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); in mtk_hw_check_dma_hang()
3818 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && in mtk_hw_check_dma_hang()
3819 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); in mtk_hw_check_dma_hang()
3822 if (++eth->reset.adma_hang_count > 2) { in mtk_hw_check_dma_hang()
3823 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3829 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3830 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3831 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3833 eth->reset.wdidx = wdidx; in mtk_hw_check_dma_hang()
3841 struct mtk_eth *eth = container_of(del_work, struct mtk_eth, in mtk_hw_reset_monitor_work() local
3844 if (test_bit(MTK_RESETTING, ð->state)) in mtk_hw_reset_monitor_work()
3848 if (mtk_hw_check_dma_hang(eth)) in mtk_hw_reset_monitor_work()
3849 schedule_work(ð->pending_work); in mtk_hw_reset_monitor_work()
3852 schedule_delayed_work(ð->reset.monitor_work, in mtk_hw_reset_monitor_work()
3856 static int mtk_hw_init(struct mtk_eth *eth, bool reset) in mtk_hw_init() argument
3860 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
3863 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
3867 pm_runtime_enable(eth->dev); in mtk_hw_init()
3868 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
3870 ret = mtk_clk_enable(eth); in mtk_hw_init()
3875 if (eth->ethsys) in mtk_hw_init()
3876 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
3877 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
3879 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
3880 ret = device_reset(eth->dev); in mtk_hw_init()
3882 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
3887 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3888 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3891 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3892 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3900 mtk_hw_warm_reset(eth); in mtk_hw_init()
3902 mtk_hw_reset(eth); in mtk_hw_init()
3904 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_init()
3906 val = mtk_r32(eth, MTK_FE_GLO_MISC); in mtk_hw_init()
3907 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); in mtk_hw_init()
3910 if (eth->pctl) { in mtk_hw_init()
3912 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
3915 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
3918 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
3926 struct net_device *dev = eth->netdev[i]; in mtk_hw_init()
3931 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); in mtk_hw_init()
3939 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3940 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3941 if (mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3942 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3943 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3945 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
3949 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3950 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3953 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3954 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3957 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
3958 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
3959 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
3960 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
3961 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
3963 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_init()
3965 mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) | in mtk_hw_init()
3969 mtk_w32(eth, 0x00600009, PSE_IQ_REV(8)); in mtk_hw_init()
3974 mtk_w32(eth, 0x00002300, PSE_DROP_CFG); in mtk_hw_init()
3979 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0)); in mtk_hw_init()
3980 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1)); in mtk_hw_init()
3981 mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2)); in mtk_hw_init()
3984 mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES); in mtk_hw_init()
3985 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); in mtk_hw_init()
3988 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0)); in mtk_hw_init()
3995 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i); in mtk_hw_init()
3996 } else if (!mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3998 mtk_w32(eth, 0x00000300, PSE_DROP_CFG); in mtk_hw_init()
4001 mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0)); in mtk_hw_init()
4004 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); in mtk_hw_init()
4007 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); in mtk_hw_init()
4008 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); in mtk_hw_init()
4009 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); in mtk_hw_init()
4010 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); in mtk_hw_init()
4011 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); in mtk_hw_init()
4012 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); in mtk_hw_init()
4013 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); in mtk_hw_init()
4014 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); in mtk_hw_init()
4017 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); in mtk_hw_init()
4018 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); in mtk_hw_init()
4019 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); in mtk_hw_init()
4020 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); in mtk_hw_init()
4021 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); in mtk_hw_init()
4022 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); in mtk_hw_init()
4023 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); in mtk_hw_init()
4024 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); in mtk_hw_init()
4027 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); in mtk_hw_init()
4028 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); in mtk_hw_init()
4029 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); in mtk_hw_init()
4030 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); in mtk_hw_init()
4031 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); in mtk_hw_init()
4032 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); in mtk_hw_init()
4039 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
4040 pm_runtime_disable(eth->dev); in mtk_hw_init()
4046 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
4048 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
4051 mtk_clk_disable(eth); in mtk_hw_deinit()
4053 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
4054 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
4062 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
4065 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
4066 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
4073 struct mtk_eth *eth = mac->hw; in mtk_change_mtu() local
4075 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
4103 static void mtk_prepare_for_reset(struct mtk_eth *eth) in mtk_prepare_for_reset() argument
4110 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_prepare_for_reset()
4112 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_prepare_for_reset()
4113 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_prepare_for_reset()
4115 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_prepare_for_reset()
4117 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_prepare_for_reset()
4121 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_prepare_for_reset()
4122 mtk_ppe_prepare_reset(eth->ppe[i]); in mtk_prepare_for_reset()
4125 mtk_w32(eth, 0, MTK_FE_INT_ENABLE); in mtk_prepare_for_reset()
4129 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; in mtk_prepare_for_reset()
4130 mtk_w32(eth, val, MTK_MAC_MCR(i)); in mtk_prepare_for_reset()
4136 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
4142 set_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4144 mtk_prepare_for_reset(eth); in mtk_pending_work()
4149 mtk_prepare_for_reset(eth); in mtk_pending_work()
4153 if (!eth->netdev[i] || !netif_running(eth->netdev[i])) in mtk_pending_work()
4156 mtk_stop(eth->netdev[i]); in mtk_pending_work()
4162 if (eth->dev->pins) in mtk_pending_work()
4163 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
4164 eth->dev->pins->default_state); in mtk_pending_work()
4165 mtk_hw_init(eth, true); in mtk_pending_work()
4169 if (!eth->netdev[i] || !test_bit(i, &restart)) in mtk_pending_work()
4172 if (mtk_open(eth->netdev[i])) { in mtk_pending_work()
4173 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
4175 dev_close(eth->netdev[i]); in mtk_pending_work()
4181 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_pending_work()
4183 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_pending_work()
4184 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_pending_work()
4186 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_pending_work()
4189 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_pending_work()
4192 clear_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4199 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
4204 if (!eth->netdev[i]) in mtk_free_dev()
4206 free_netdev(eth->netdev[i]); in mtk_free_dev()
4209 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_free_dev()
4210 if (!eth->dsa_meta[i]) in mtk_free_dev()
4212 metadata_dst_free(eth->dsa_meta[i]); in mtk_free_dev()
4218 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
4224 if (!eth->netdev[i]) in mtk_unreg_dev()
4226 mac = netdev_priv(eth->netdev[i]); in mtk_unreg_dev()
4227 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_unreg_dev()
4229 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
4235 static void mtk_sgmii_destroy(struct mtk_eth *eth) in mtk_sgmii_destroy() argument
4240 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]); in mtk_sgmii_destroy()
4243 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
4245 mtk_sgmii_destroy(eth); in mtk_cleanup()
4246 mtk_unreg_dev(eth); in mtk_cleanup()
4247 mtk_free_dev(eth); in mtk_cleanup()
4248 cancel_work_sync(ð->pending_work); in mtk_cleanup()
4249 cancel_delayed_work_sync(ð->reset.monitor_work); in mtk_cleanup()
4350 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) in mtk_ethtool_pp_stats() argument
4355 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
4356 struct mtk_rx_ring *ring = ð->rx_ring[i]; in mtk_ethtool_pp_stats()
4508 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
4519 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
4525 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
4529 if (eth->netdev[id]) { in mtk_add_mac()
4530 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
4534 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_add_mac()
4537 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); in mtk_add_mac()
4538 if (!eth->netdev[id]) { in mtk_add_mac()
4539 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
4542 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
4543 eth->mac[id] = mac; in mtk_add_mac()
4545 mac->hw = eth; in mtk_add_mac()
4548 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]); in mtk_add_mac()
4554 eth_hw_addr_random(eth->netdev[id]); in mtk_add_mac()
4555 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_add_mac()
4556 eth->netdev[id]->dev_addr); in mtk_add_mac()
4562 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
4566 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
4573 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_add_mac()
4581 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
4589 mac->phylink_config.dev = ð->netdev[id]->dev; in mtk_add_mac()
4615 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mtk_add_mac()
4651 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
4652 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
4653 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
4654 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
4656 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
4657 if (eth->hwlro) in mtk_add_mac()
4658 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
4660 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
4662 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
4663 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
4665 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
4666 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
4668 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
4669 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
4671 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
4673 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_add_mac()
4678 if (mtk_page_pool_enabled(eth)) in mtk_add_mac()
4679 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC | in mtk_add_mac()
4687 free_netdev(eth->netdev[id]); in mtk_add_mac()
4691 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) in mtk_eth_set_dma_device() argument
4700 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
4710 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
4720 static int mtk_sgmii_init(struct mtk_eth *eth) in mtk_sgmii_init() argument
4728 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i); in mtk_sgmii_init()
4742 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap, in mtk_sgmii_init()
4743 eth->soc->ana_rgc3, in mtk_sgmii_init()
4754 struct mtk_eth *eth; in mtk_probe() local
4757 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
4758 if (!eth) in mtk_probe()
4761 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
4763 eth->dev = &pdev->dev; in mtk_probe()
4764 eth->dma_dev = &pdev->dev; in mtk_probe()
4765 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
4766 if (IS_ERR(eth->base)) in mtk_probe()
4767 return PTR_ERR(eth->base); in mtk_probe()
4769 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
4770 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
4772 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4777 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4778 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1); in mtk_probe()
4779 if (IS_ERR(eth->sram_base)) in mtk_probe()
4780 return PTR_ERR(eth->sram_base); in mtk_probe()
4782 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4786 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_probe()
4797 spin_lock_init(ð->page_lock); in mtk_probe()
4798 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
4799 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
4800 spin_lock_init(ð->dim_lock); in mtk_probe()
4802 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4803 INIT_WORK(ð->rx_dim.work, mtk_dim_rx); in mtk_probe()
4804 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work); in mtk_probe()
4806 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4807 INIT_WORK(ð->tx_dim.work, mtk_dim_tx); in mtk_probe()
4809 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4810 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4812 if (IS_ERR(eth->ethsys)) { in mtk_probe()
4814 return PTR_ERR(eth->ethsys); in mtk_probe()
4818 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
4819 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4821 if (IS_ERR(eth->infra)) { in mtk_probe()
4823 return PTR_ERR(eth->infra); in mtk_probe()
4837 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
4838 err = mtk_sgmii_init(eth); in mtk_probe()
4844 if (eth->soc->required_pctl) { in mtk_probe()
4845 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4847 if (IS_ERR(eth->pctl)) { in mtk_probe()
4849 err = PTR_ERR(eth->pctl); in mtk_probe()
4854 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_probe()
4860 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4861 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4867 eth->phy_scratch_ring = res_sram->start; in mtk_probe()
4869 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4874 if (eth->soc->offload_version) { in mtk_probe()
4880 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
4888 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
4890 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
4896 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
4897 eth->irq[i] = eth->irq[0]; in mtk_probe()
4899 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
4900 if (eth->irq[i] < 0) { in mtk_probe()
4906 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
4907 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
4909 if (IS_ERR(eth->clks[i])) { in mtk_probe()
4910 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
4914 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
4920 eth->clks[i] = NULL; in mtk_probe()
4924 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
4925 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
4927 err = mtk_hw_init(eth, false); in mtk_probe()
4931 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
4941 err = mtk_add_mac(eth, mac_np); in mtk_probe()
4948 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
4949 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
4951 dev_name(eth->dev), eth); in mtk_probe()
4953 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
4955 dev_name(eth->dev), eth); in mtk_probe()
4959 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
4961 dev_name(eth->dev), eth); in mtk_probe()
4967 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4968 err = mtk_mdio_init(eth); in mtk_probe()
4973 if (eth->soc->offload_version) { in mtk_probe()
4974 u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; in mtk_probe()
4976 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); in mtk_probe()
4978 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; in mtk_probe()
4980 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); in mtk_probe()
4982 if (!eth->ppe[i]) { in mtk_probe()
4988 err = mtk_eth_offload_init(eth); in mtk_probe()
4994 if (!eth->netdev[i]) in mtk_probe()
4997 err = register_netdev(eth->netdev[i]); in mtk_probe()
4999 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
5002 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
5004 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
5010 init_dummy_netdev(ð->dummy_dev); in mtk_probe()
5011 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx); in mtk_probe()
5012 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); in mtk_probe()
5014 platform_set_drvdata(pdev, eth); in mtk_probe()
5015 schedule_delayed_work(ð->reset.monitor_work, in mtk_probe()
5021 mtk_ppe_deinit(eth); in mtk_probe()
5022 mtk_mdio_cleanup(eth); in mtk_probe()
5024 mtk_free_dev(eth); in mtk_probe()
5026 mtk_hw_deinit(eth); in mtk_probe()
5030 mtk_sgmii_destroy(eth); in mtk_probe()
5037 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
5043 if (!eth->netdev[i]) in mtk_remove()
5045 mtk_stop(eth->netdev[i]); in mtk_remove()
5046 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
5051 mtk_hw_deinit(eth); in mtk_remove()
5053 netif_napi_del(ð->tx_napi); in mtk_remove()
5054 netif_napi_del(ð->rx_napi); in mtk_remove()
5055 mtk_cleanup(eth); in mtk_remove()
5056 mtk_mdio_cleanup(eth); in mtk_remove()