Lines Matching +full:75 +full:- +full:ec

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2014-2024 Broadcom
23 #include <linux/dma-mapping.h>
51 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
53 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
59 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
62 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
65 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
74 * peripheral registers for CPU-native byte order. in bcmgenet_writel()
104 * the platform is explicitly configured for 64-bits/LPAE. in dmadesc_set_addr()
107 if (priv->hw_params->flags & GENET_HAS_40BITS) in dmadesc_set_addr()
150 return bcmgenet_readl(priv->base + in bcmgenet_tbuf_ctrl_get()
151 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_get()
159 bcmgenet_writel(val, priv->base + in bcmgenet_tbuf_ctrl_set()
160 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_set()
168 return bcmgenet_readl(priv->base + in bcmgenet_bp_mc_get()
169 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_get()
177 bcmgenet_writel(val, priv->base + in bcmgenet_bp_mc_set()
178 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_set()
320 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_readl()
327 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_writel()
334 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_readl()
341 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_writel()
372 /* GENET v4 supports 40-bits pointer addressing
412 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_readl()
421 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_writel()
430 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_readl()
439 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_writel()
499 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * in bcmgenet_hfb_set_filter_length()
515 size--; in bcmgenet_hfb_validate_mask()
518 return -EINVAL; in bcmgenet_hfb_validate_mask()
534 index = f_index * priv->hw_params->hfb_filter_size + offset / 2; in bcmgenet_hfb_insert_data()
537 while (size--) { in bcmgenet_hfb_insert_data()
581 struct ethtool_rx_flow_spec *fs = &rule->fs; in bcmgenet_hfb_create_rxnfc_filter()
588 f = fs->location; in bcmgenet_hfb_create_rxnfc_filter()
589 if (fs->flow_type & FLOW_MAC_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
591 &fs->h_ext.h_dest, &fs->m_ext.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
592 sizeof(fs->h_ext.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
595 if (fs->flow_type & FLOW_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
596 if (fs->m_ext.vlan_etype || in bcmgenet_hfb_create_rxnfc_filter()
597 fs->m_ext.vlan_tci) { in bcmgenet_hfb_create_rxnfc_filter()
599 &fs->h_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
600 &fs->m_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
601 sizeof(fs->h_ext.vlan_etype)); in bcmgenet_hfb_create_rxnfc_filter()
603 &fs->h_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
604 &fs->m_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
605 sizeof(fs->h_ext.vlan_tci)); in bcmgenet_hfb_create_rxnfc_filter()
611 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_hfb_create_rxnfc_filter()
615 &fs->h_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
616 &fs->m_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
617 sizeof(fs->h_u.ether_spec.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
619 &fs->h_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
620 &fs->m_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
621 sizeof(fs->h_u.ether_spec.h_source)); in bcmgenet_hfb_create_rxnfc_filter()
623 &fs->h_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
624 &fs->m_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
625 sizeof(fs->h_u.ether_spec.h_proto)); in bcmgenet_hfb_create_rxnfc_filter()
635 &fs->h_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
636 &fs->m_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
637 sizeof(fs->h_u.usr_ip4_spec.tos)); in bcmgenet_hfb_create_rxnfc_filter()
639 &fs->h_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
640 &fs->m_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
641 sizeof(fs->h_u.usr_ip4_spec.proto)); in bcmgenet_hfb_create_rxnfc_filter()
643 &fs->h_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
644 &fs->m_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
645 sizeof(fs->h_u.usr_ip4_spec.ip4src)); in bcmgenet_hfb_create_rxnfc_filter()
647 &fs->h_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
648 &fs->m_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
649 sizeof(fs->h_u.usr_ip4_spec.ip4dst)); in bcmgenet_hfb_create_rxnfc_filter()
650 if (!fs->m_u.usr_ip4_spec.l4_4_bytes) in bcmgenet_hfb_create_rxnfc_filter()
659 size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); in bcmgenet_hfb_create_rxnfc_filter()
662 &fs->h_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
663 &fs->m_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
670 if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { in bcmgenet_hfb_create_rxnfc_filter()
675 rule->state = BCMGENET_RXNFC_STATE_DISABLED; in bcmgenet_hfb_create_rxnfc_filter()
679 fs->ring_cookie); in bcmgenet_hfb_create_rxnfc_filter()
681 rule->state = BCMGENET_RXNFC_STATE_ENABLED; in bcmgenet_hfb_create_rxnfc_filter()
693 base = f_index * priv->hw_params->hfb_filter_size; in bcmgenet_hfb_clear_filter()
694 for (i = 0; i < priv->hw_params->hfb_filter_size; i++) in bcmgenet_hfb_clear_filter()
712 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) in bcmgenet_hfb_clear()
716 for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) in bcmgenet_hfb_clear()
724 INIT_LIST_HEAD(&priv->rxnfc_list); in bcmgenet_hfb_init()
729 INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); in bcmgenet_hfb_init()
730 priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_hfb_init()
741 return clk_prepare_enable(priv->clk); in bcmgenet_begin()
749 clk_disable_unprepare(priv->clk); in bcmgenet_complete()
756 return -EINVAL; in bcmgenet_get_link_ksettings()
758 if (!dev->phydev) in bcmgenet_get_link_ksettings()
759 return -ENODEV; in bcmgenet_get_link_ksettings()
761 phy_ethtool_ksettings_get(dev->phydev, cmd); in bcmgenet_get_link_ksettings()
770 return -EINVAL; in bcmgenet_set_link_ksettings()
772 if (!dev->phydev) in bcmgenet_set_link_ksettings()
773 return -ENODEV; in bcmgenet_set_link_ksettings()
775 return phy_ethtool_ksettings_set(dev->phydev, cmd); in bcmgenet_set_link_ksettings()
785 ret = clk_prepare_enable(priv->clk); in bcmgenet_set_features()
791 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); in bcmgenet_set_features()
793 clk_disable_unprepare(priv->clk); in bcmgenet_set_features()
802 return priv->msg_enable; in bcmgenet_get_msglevel()
809 priv->msg_enable = level; in bcmgenet_set_msglevel()
813 struct ethtool_coalesce *ec, in bcmgenet_get_coalesce() argument
821 ec->tx_max_coalesced_frames = in bcmgenet_get_coalesce()
824 ec->rx_max_coalesced_frames = in bcmgenet_get_coalesce()
827 ec->rx_coalesce_usecs = in bcmgenet_get_coalesce()
830 for (i = 0; i < priv->hw_params->rx_queues; i++) { in bcmgenet_get_coalesce()
831 ring = &priv->rx_rings[i]; in bcmgenet_get_coalesce()
832 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; in bcmgenet_get_coalesce()
834 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_get_coalesce()
835 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; in bcmgenet_get_coalesce()
843 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_set_rx_coalesce()
844 unsigned int i = ring->index; in bcmgenet_set_rx_coalesce()
856 struct ethtool_coalesce *ec) in bcmgenet_set_ring_rx_coalesce() argument
861 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
862 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
863 usecs = ring->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
864 pkts = ring->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
866 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { in bcmgenet_set_ring_rx_coalesce()
867 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); in bcmgenet_set_ring_rx_coalesce()
872 ring->dim.use_dim = ec->use_adaptive_rx_coalesce; in bcmgenet_set_ring_rx_coalesce()
877 struct ethtool_coalesce *ec, in bcmgenet_set_coalesce() argument
888 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
889 ec->tx_max_coalesced_frames == 0 || in bcmgenet_set_coalesce()
890 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
891 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) in bcmgenet_set_coalesce()
892 return -EINVAL; in bcmgenet_set_coalesce()
894 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) in bcmgenet_set_coalesce()
895 return -EINVAL; in bcmgenet_set_coalesce()
903 * ethtool knob to do coalescing on a per-queue basis in bcmgenet_set_coalesce()
905 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_set_coalesce()
907 ec->tx_max_coalesced_frames, in bcmgenet_set_coalesce()
910 ec->tx_max_coalesced_frames, in bcmgenet_set_coalesce()
913 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_set_coalesce()
914 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); in bcmgenet_set_coalesce()
915 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); in bcmgenet_set_coalesce()
928 epause->autoneg = priv->autoneg_pause; in bcmgenet_get_pauseparam()
933 epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
934 epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
937 epause->tx_pause = priv->tx_pause; in bcmgenet_get_pauseparam()
938 epause->rx_pause = priv->rx_pause; in bcmgenet_get_pauseparam()
947 if (!dev->phydev) in bcmgenet_set_pauseparam()
948 return -ENODEV; in bcmgenet_set_pauseparam()
950 if (!phy_validate_pause(dev->phydev, epause)) in bcmgenet_set_pauseparam()
951 return -EINVAL; in bcmgenet_set_pauseparam()
953 priv->autoneg_pause = !!epause->autoneg; in bcmgenet_set_pauseparam()
954 priv->tx_pause = !!epause->tx_pause; in bcmgenet_set_pauseparam()
955 priv->rx_pause = !!epause->rx_pause; in bcmgenet_set_pauseparam()
957 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_set_pauseparam()
964 BCMGENET_STAT_NETDEV = -1,
983 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
990 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1002 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1131 strscpy(info->driver, "bcmgenet", sizeof(info->driver)); in bcmgenet_get_drvinfo()
1140 return -EOPNOTSUPP; in bcmgenet_get_sset_count()
1210 switch (s->type) { in bcmgenet_update_mib_counters()
1227 val = bcmgenet_umac_readl(priv, s->reg_offset); in bcmgenet_update_mib_counters()
1231 s->reg_offset); in bcmgenet_update_mib_counters()
1234 s->reg_offset); in bcmgenet_update_mib_counters()
1239 j += s->stat_sizeof; in bcmgenet_update_mib_counters()
1240 p = (char *)priv + s->stat_offset; in bcmgenet_update_mib_counters()
1255 dev->netdev_ops->ndo_get_stats(dev); in bcmgenet_get_ethtool_stats()
1262 if (s->type == BCMGENET_STAT_NETDEV) in bcmgenet_get_ethtool_stats()
1263 p = (char *)&dev->stats; in bcmgenet_get_ethtool_stats()
1266 p += s->stat_offset; in bcmgenet_get_ethtool_stats()
1268 s->stat_sizeof == sizeof(unsigned long)) in bcmgenet_get_ethtool_stats()
1279 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; in bcmgenet_eee_enable_set()
1282 if (enable && !priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1283 clk_prepare_enable(priv->clk_eee); in bcmgenet_eee_enable_set()
1284 priv->clk_eee_enabled = true; in bcmgenet_eee_enable_set()
1295 reg = bcmgenet_readl(priv->base + off); in bcmgenet_eee_enable_set()
1300 bcmgenet_writel(reg, priv->base + off); in bcmgenet_eee_enable_set()
1310 if (!enable && priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1311 clk_disable_unprepare(priv->clk_eee); in bcmgenet_eee_enable_set()
1312 priv->clk_eee_enabled = false; in bcmgenet_eee_enable_set()
1315 priv->eee.eee_enabled = enable; in bcmgenet_eee_enable_set()
1316 priv->eee.eee_active = enable; in bcmgenet_eee_enable_set()
1317 priv->eee.tx_lpi_enabled = tx_lpi_enabled; in bcmgenet_eee_enable_set()
1323 struct ethtool_eee *p = &priv->eee; in bcmgenet_get_eee()
1326 return -EOPNOTSUPP; in bcmgenet_get_eee()
1328 if (!dev->phydev) in bcmgenet_get_eee()
1329 return -ENODEV; in bcmgenet_get_eee()
1331 e->eee_enabled = p->eee_enabled; in bcmgenet_get_eee()
1332 e->eee_active = p->eee_active; in bcmgenet_get_eee()
1333 e->tx_lpi_enabled = p->tx_lpi_enabled; in bcmgenet_get_eee()
1334 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); in bcmgenet_get_eee()
1336 return phy_ethtool_get_eee(dev->phydev, e); in bcmgenet_get_eee()
1342 struct ethtool_eee *p = &priv->eee; in bcmgenet_set_eee()
1345 return -EOPNOTSUPP; in bcmgenet_set_eee()
1347 if (!dev->phydev) in bcmgenet_set_eee()
1348 return -ENODEV; in bcmgenet_set_eee()
1350 p->eee_enabled = e->eee_enabled; in bcmgenet_set_eee()
1352 if (!p->eee_enabled) { in bcmgenet_set_eee()
1355 p->eee_active = phy_init_eee(dev->phydev, false) >= 0; in bcmgenet_set_eee()
1356 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); in bcmgenet_set_eee()
1357 bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); in bcmgenet_set_eee()
1360 return phy_ethtool_set_eee(dev->phydev, e); in bcmgenet_set_eee()
1369 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && in bcmgenet_validate_flow()
1370 cmd->fs.location != RX_CLS_LOC_ANY) { in bcmgenet_validate_flow()
1372 cmd->fs.location); in bcmgenet_validate_flow()
1373 return -EINVAL; in bcmgenet_validate_flow()
1376 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1378 l4_mask = &cmd->fs.m_u.usr_ip4_spec; in bcmgenet_validate_flow()
1380 if (VALIDATE_MASK(l4_mask->ip4src) || in bcmgenet_validate_flow()
1381 VALIDATE_MASK(l4_mask->ip4dst) || in bcmgenet_validate_flow()
1382 VALIDATE_MASK(l4_mask->l4_4_bytes) || in bcmgenet_validate_flow()
1383 VALIDATE_MASK(l4_mask->proto) || in bcmgenet_validate_flow()
1384 VALIDATE_MASK(l4_mask->ip_ver) || in bcmgenet_validate_flow()
1385 VALIDATE_MASK(l4_mask->tos)) { in bcmgenet_validate_flow()
1387 return -EINVAL; in bcmgenet_validate_flow()
1391 eth_mask = &cmd->fs.m_u.ether_spec; in bcmgenet_validate_flow()
1393 if (VALIDATE_MASK(eth_mask->h_dest) || in bcmgenet_validate_flow()
1394 VALIDATE_MASK(eth_mask->h_source) || in bcmgenet_validate_flow()
1395 VALIDATE_MASK(eth_mask->h_proto)) { in bcmgenet_validate_flow()
1397 return -EINVAL; in bcmgenet_validate_flow()
1402 cmd->fs.flow_type); in bcmgenet_validate_flow()
1403 return -EINVAL; in bcmgenet_validate_flow()
1406 if ((cmd->fs.flow_type & FLOW_EXT)) { in bcmgenet_validate_flow()
1408 if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || in bcmgenet_validate_flow()
1409 VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { in bcmgenet_validate_flow()
1411 return -EINVAL; in bcmgenet_validate_flow()
1413 if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { in bcmgenet_validate_flow()
1414 netdev_err(dev, "rxnfc: user-def not supported\n"); in bcmgenet_validate_flow()
1415 return -EINVAL; in bcmgenet_validate_flow()
1419 if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1421 if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { in bcmgenet_validate_flow()
1423 return -EINVAL; in bcmgenet_validate_flow()
1437 if (priv->hw_params->hfb_filter_size < 128) { in bcmgenet_insert_flow()
1439 return -EINVAL; in bcmgenet_insert_flow()
1442 if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && in bcmgenet_insert_flow()
1443 cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { in bcmgenet_insert_flow()
1445 cmd->fs.ring_cookie); in bcmgenet_insert_flow()
1446 return -EINVAL; in bcmgenet_insert_flow()
1453 if (cmd->fs.location == RX_CLS_LOC_ANY) { in bcmgenet_insert_flow()
1454 list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { in bcmgenet_insert_flow()
1455 cmd->fs.location = loc_rule->fs.location; in bcmgenet_insert_flow()
1456 err = memcmp(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1463 loc_rule = &priv->rxnfc_rules[i]; in bcmgenet_insert_flow()
1464 if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1465 cmd->fs.location = i; in bcmgenet_insert_flow()
1470 cmd->fs.location = RX_CLS_LOC_ANY; in bcmgenet_insert_flow()
1471 return -ENOSPC; in bcmgenet_insert_flow()
1474 loc_rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_insert_flow()
1476 if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_insert_flow()
1477 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); in bcmgenet_insert_flow()
1478 if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1479 list_del(&loc_rule->list); in bcmgenet_insert_flow()
1480 bcmgenet_hfb_clear_filter(priv, cmd->fs.location); in bcmgenet_insert_flow()
1482 loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_insert_flow()
1483 memcpy(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1488 list_add_tail(&loc_rule->list, &priv->rxnfc_list); in bcmgenet_insert_flow()
1500 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) in bcmgenet_delete_flow()
1501 return -EINVAL; in bcmgenet_delete_flow()
1503 rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_delete_flow()
1504 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1505 err = -ENOENT; in bcmgenet_delete_flow()
1509 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_delete_flow()
1510 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); in bcmgenet_delete_flow()
1511 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1512 list_del(&rule->list); in bcmgenet_delete_flow()
1513 bcmgenet_hfb_clear_filter(priv, cmd->fs.location); in bcmgenet_delete_flow()
1515 rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_delete_flow()
1516 memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); in bcmgenet_delete_flow()
1527 switch (cmd->cmd) { in bcmgenet_set_rxnfc()
1535 netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", in bcmgenet_set_rxnfc()
1536 cmd->cmd); in bcmgenet_set_rxnfc()
1537 return -EINVAL; in bcmgenet_set_rxnfc()
1551 return -EINVAL; in bcmgenet_get_flow()
1553 rule = &priv->rxnfc_rules[loc]; in bcmgenet_get_flow()
1554 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_get_flow()
1555 err = -ENOENT; in bcmgenet_get_flow()
1557 memcpy(&cmd->fs, &rule->fs, in bcmgenet_get_flow()
1568 list_for_each(pos, &priv->rxnfc_list) in bcmgenet_get_num_flows()
1582 switch (cmd->cmd) { in bcmgenet_get_rxnfc()
1584 cmd->data = priv->hw_params->rx_queues ?: 1; in bcmgenet_get_rxnfc()
1587 cmd->rule_cnt = bcmgenet_get_num_flows(priv); in bcmgenet_get_rxnfc()
1588 cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; in bcmgenet_get_rxnfc()
1591 err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); in bcmgenet_get_rxnfc()
1594 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_get_rxnfc()
1595 if (i < cmd->rule_cnt) in bcmgenet_get_rxnfc()
1596 rule_locs[i++] = rule->fs.location; in bcmgenet_get_rxnfc()
1597 cmd->rule_cnt = i; in bcmgenet_get_rxnfc()
1598 cmd->data = MAX_NUM_OF_FS_RULES; in bcmgenet_get_rxnfc()
1601 err = -EOPNOTSUPP; in bcmgenet_get_rxnfc()
1647 phy_detach(priv->dev->phydev); in bcmgenet_power_down()
1656 if (priv->hw_params->flags & GENET_HAS_EXT) { in bcmgenet_power_down()
1658 if (GENET_IS_V5(priv) && !priv->ephy_16nm) in bcmgenet_power_down()
1671 bcmgenet_phy_power_set(priv->dev, false); in bcmgenet_power_down()
1686 if (!(priv->hw_params->flags & GENET_HAS_EXT)) in bcmgenet_power_up()
1695 if (GENET_IS_V5(priv) && !priv->ephy_16nm) { in bcmgenet_power_up()
1712 bcmgenet_phy_power_set(priv->dev, true); in bcmgenet_power_up()
1735 tx_cb_ptr = ring->cbs; in bcmgenet_get_txcb()
1736 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_get_txcb()
1739 if (ring->write_ptr == ring->end_ptr) in bcmgenet_get_txcb()
1740 ring->write_ptr = ring->cb_ptr; in bcmgenet_get_txcb()
1742 ring->write_ptr++; in bcmgenet_get_txcb()
1752 tx_cb_ptr = ring->cbs; in bcmgenet_put_txcb()
1753 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_put_txcb()
1756 if (ring->write_ptr == ring->cb_ptr) in bcmgenet_put_txcb()
1757 ring->write_ptr = ring->end_ptr; in bcmgenet_put_txcb()
1759 ring->write_ptr--; in bcmgenet_put_txcb()
1766 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, in bcmgenet_rx_ring16_int_disable()
1772 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, in bcmgenet_rx_ring16_int_enable()
1778 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_disable()
1779 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_disable()
1785 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_enable()
1786 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_enable()
1792 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, in bcmgenet_tx_ring16_int_disable()
1798 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, in bcmgenet_tx_ring16_int_enable()
1804 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_enable()
1810 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_disable()
1823 skb = cb->skb; in bcmgenet_free_tx_cb()
1826 cb->skb = NULL; in bcmgenet_free_tx_cb()
1827 if (cb == GENET_CB(skb)->first_cb) in bcmgenet_free_tx_cb()
1837 if (cb == GENET_CB(skb)->last_cb) in bcmgenet_free_tx_cb()
1857 skb = cb->skb; in bcmgenet_free_rx_cb()
1858 cb->skb = NULL; in bcmgenet_free_rx_cb()
1882 if (ring->index == DESC_INDEX) in __bcmgenet_tx_reclaim()
1886 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), in __bcmgenet_tx_reclaim()
1890 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) in __bcmgenet_tx_reclaim()
1892 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; in __bcmgenet_tx_reclaim()
1896 __func__, ring->index, ring->c_index, c_index, txbds_ready); in __bcmgenet_tx_reclaim()
1900 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, in __bcmgenet_tx_reclaim()
1901 &priv->tx_cbs[ring->clean_ptr]); in __bcmgenet_tx_reclaim()
1904 bytes_compl += GENET_CB(skb)->bytes_sent; in __bcmgenet_tx_reclaim()
1909 if (likely(ring->clean_ptr < ring->end_ptr)) in __bcmgenet_tx_reclaim()
1910 ring->clean_ptr++; in __bcmgenet_tx_reclaim()
1912 ring->clean_ptr = ring->cb_ptr; in __bcmgenet_tx_reclaim()
1915 ring->free_bds += txbds_processed; in __bcmgenet_tx_reclaim()
1916 ring->c_index = c_index; in __bcmgenet_tx_reclaim()
1918 ring->packets += pkts_compl; in __bcmgenet_tx_reclaim()
1919 ring->bytes += bytes_compl; in __bcmgenet_tx_reclaim()
1921 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), in __bcmgenet_tx_reclaim()
1932 spin_lock_bh(&ring->lock); in bcmgenet_tx_reclaim()
1934 spin_unlock_bh(&ring->lock); in bcmgenet_tx_reclaim()
1946 spin_lock(&ring->lock); in bcmgenet_tx_poll()
1947 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); in bcmgenet_tx_poll()
1948 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { in bcmgenet_tx_poll()
1949 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); in bcmgenet_tx_poll()
1952 spin_unlock(&ring->lock); in bcmgenet_tx_poll()
1956 ring->int_enable(ring); in bcmgenet_tx_poll()
1970 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_tx_reclaim_all()
1971 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); in bcmgenet_tx_reclaim_all()
1974 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); in bcmgenet_tx_reclaim_all()
1998 priv->mib.tx_realloc_tsb_failed++; in bcmgenet_add_tsb()
1999 dev->stats.tx_dropped++; in bcmgenet_add_tsb()
2004 priv->mib.tx_realloc_tsb++; in bcmgenet_add_tsb()
2008 status = (struct status_64 *)skb->data; in bcmgenet_add_tsb()
2010 if (skb->ip_summed == CHECKSUM_PARTIAL) { in bcmgenet_add_tsb()
2011 ip_ver = skb->protocol; in bcmgenet_add_tsb()
2014 ip_proto = ip_hdr(skb)->protocol; in bcmgenet_add_tsb()
2017 ip_proto = ipv6_hdr(skb)->nexthdr; in bcmgenet_add_tsb()
2025 offset = skb_checksum_start_offset(skb) - sizeof(*status); in bcmgenet_add_tsb()
2027 (offset + skb->csum_offset) | in bcmgenet_add_tsb()
2034 status->tx_csum_info = tx_csum_info; in bcmgenet_add_tsb()
2048 struct device *kdev = &priv->pdev->dev; in bcmgenet_xmit()
2071 index -= 1; in bcmgenet_xmit()
2073 ring = &priv->tx_rings[index]; in bcmgenet_xmit()
2074 txq = netdev_get_tx_queue(dev, ring->queue); in bcmgenet_xmit()
2076 nr_frags = skb_shinfo(skb)->nr_frags; in bcmgenet_xmit()
2078 spin_lock(&ring->lock); in bcmgenet_xmit()
2079 if (ring->free_bds <= (nr_frags + 1)) { in bcmgenet_xmit()
2089 GENET_CB(skb)->bytes_sent = skb->len; in bcmgenet_xmit()
2105 GENET_CB(skb)->first_cb = tx_cb_ptr; in bcmgenet_xmit()
2107 mapping = dma_map_single(kdev, skb->data, size, in bcmgenet_xmit()
2111 frag = &skb_shinfo(skb)->frags[i - 1]; in bcmgenet_xmit()
2119 priv->mib.tx_dma_failed++; in bcmgenet_xmit()
2127 tx_cb_ptr->skb = skb; in bcmgenet_xmit()
2130 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); in bcmgenet_xmit()
2139 if (skb->ip_summed == CHECKSUM_PARTIAL) in bcmgenet_xmit()
2145 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); in bcmgenet_xmit()
2148 GENET_CB(skb)->last_cb = tx_cb_ptr; in bcmgenet_xmit()
2154 ring->free_bds -= nr_frags + 1; in bcmgenet_xmit()
2155 ring->prod_index += nr_frags + 1; in bcmgenet_xmit()
2156 ring->prod_index &= DMA_P_INDEX_MASK; in bcmgenet_xmit()
2158 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); in bcmgenet_xmit()
2160 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) in bcmgenet_xmit()
2165 bcmgenet_tdma_ring_writel(priv, ring->index, in bcmgenet_xmit()
2166 ring->prod_index, TDMA_PROD_INDEX); in bcmgenet_xmit()
2168 spin_unlock(&ring->lock); in bcmgenet_xmit()
2177 while (i-- > 0) { in bcmgenet_xmit()
2189 struct device *kdev = &priv->pdev->dev; in bcmgenet_rx_refill()
2195 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, in bcmgenet_rx_refill()
2198 priv->mib.alloc_rx_buff_failed++; in bcmgenet_rx_refill()
2199 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2204 /* DMA-map the new Rx skb */ in bcmgenet_rx_refill()
2205 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, in bcmgenet_rx_refill()
2208 priv->mib.rx_dma_failed++; in bcmgenet_rx_refill()
2210 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2215 /* Grab the current Rx skb from the ring and DMA-unmap it */ in bcmgenet_rx_refill()
2219 cb->skb = skb; in bcmgenet_rx_refill()
2221 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); in bcmgenet_rx_refill()
2222 dmadesc_set_addr(priv, cb->bd_addr, mapping); in bcmgenet_rx_refill()
2228 /* bcmgenet_desc_rx - descriptor based rx process.
2234 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_desc_rx()
2235 struct net_device *dev = priv->dev; in bcmgenet_desc_rx()
2247 if (ring->index == DESC_INDEX) { in bcmgenet_desc_rx()
2251 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); in bcmgenet_desc_rx()
2257 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); in bcmgenet_desc_rx()
2261 if (discards > ring->old_discards) { in bcmgenet_desc_rx()
2262 discards = discards - ring->old_discards; in bcmgenet_desc_rx()
2263 ring->errors += discards; in bcmgenet_desc_rx()
2264 ring->old_discards += discards; in bcmgenet_desc_rx()
2266 /* Clear HW register when we reach 75% of maximum 0xFFFF */ in bcmgenet_desc_rx()
2267 if (ring->old_discards >= 0xC000) { in bcmgenet_desc_rx()
2268 ring->old_discards = 0; in bcmgenet_desc_rx()
2269 bcmgenet_rdma_ring_writel(priv, ring->index, 0, in bcmgenet_desc_rx()
2275 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2285 cb = &priv->rx_cbs[ring->read_ptr]; in bcmgenet_desc_rx()
2289 ring->dropped++; in bcmgenet_desc_rx()
2293 status = (struct status_64 *)skb->data; in bcmgenet_desc_rx()
2294 dma_length_status = status->length_status; in bcmgenet_desc_rx()
2295 if (dev->features & NETIF_F_RXCSUM) { in bcmgenet_desc_rx()
2296 rx_csum = (__force __be16)(status->rx_csum & 0xffff); in bcmgenet_desc_rx()
2298 skb->csum = (__force __wsum)ntohs(rx_csum); in bcmgenet_desc_rx()
2299 skb->ip_summed = CHECKSUM_COMPLETE; in bcmgenet_desc_rx()
2311 __func__, p_index, ring->c_index, in bcmgenet_desc_rx()
2312 ring->read_ptr, dma_length_status); in bcmgenet_desc_rx()
2316 dev->stats.rx_length_errors++; in bcmgenet_desc_rx()
2317 dev->stats.rx_errors++; in bcmgenet_desc_rx()
2325 ring->errors++; in bcmgenet_desc_rx()
2339 dev->stats.rx_crc_errors++; in bcmgenet_desc_rx()
2341 dev->stats.rx_over_errors++; in bcmgenet_desc_rx()
2343 dev->stats.rx_frame_errors++; in bcmgenet_desc_rx()
2345 dev->stats.rx_length_errors++; in bcmgenet_desc_rx()
2346 dev->stats.rx_errors++; in bcmgenet_desc_rx()
2355 len -= 66; in bcmgenet_desc_rx()
2357 if (priv->crc_fwd_en) { in bcmgenet_desc_rx()
2358 skb_trim(skb, len - ETH_FCS_LEN); in bcmgenet_desc_rx()
2359 len -= ETH_FCS_LEN; in bcmgenet_desc_rx()
2365 skb->protocol = eth_type_trans(skb, priv->dev); in bcmgenet_desc_rx()
2366 ring->packets++; in bcmgenet_desc_rx()
2367 ring->bytes += len; in bcmgenet_desc_rx()
2369 dev->stats.multicast++; in bcmgenet_desc_rx()
2372 napi_gro_receive(&ring->napi, skb); in bcmgenet_desc_rx()
2377 if (likely(ring->read_ptr < ring->end_ptr)) in bcmgenet_desc_rx()
2378 ring->read_ptr++; in bcmgenet_desc_rx()
2380 ring->read_ptr = ring->cb_ptr; in bcmgenet_desc_rx()
2382 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2383 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); in bcmgenet_desc_rx()
2386 ring->dim.bytes = bytes_processed; in bcmgenet_desc_rx()
2387 ring->dim.packets = rxpktprocessed; in bcmgenet_desc_rx()
2404 ring->int_enable(ring); in bcmgenet_rx_poll()
2407 if (ring->dim.use_dim) { in bcmgenet_rx_poll()
2408 dim_update_sample(ring->dim.event_ctr, ring->dim.packets, in bcmgenet_rx_poll()
2409 ring->dim.bytes, &dim_sample); in bcmgenet_rx_poll()
2410 net_dim(&ring->dim.dim, dim_sample); in bcmgenet_rx_poll()
2424 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in bcmgenet_dim_work()
2427 dim->state = DIM_START_MEASURE; in bcmgenet_dim_work()
2438 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_alloc_rx_buffers()
2441 for (i = 0; i < ring->size; i++) { in bcmgenet_alloc_rx_buffers()
2442 cb = ring->cbs + i; in bcmgenet_alloc_rx_buffers()
2446 if (!cb->skb) in bcmgenet_alloc_rx_buffers()
2447 return -ENOMEM; in bcmgenet_alloc_rx_buffers()
2459 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_free_rx_buffers()
2460 cb = &priv->rx_cbs[i]; in bcmgenet_free_rx_buffers()
2462 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); in bcmgenet_free_rx_buffers()
2472 spin_lock_bh(&priv->reg_lock); in umac_enable_set()
2475 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2483 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2485 /* UniMAC stops on a packet boundary, wait for a full-size packet in umac_enable_set()
2499 spin_lock_bh(&priv->reg_lock); in reset_umac()
2502 spin_unlock_bh(&priv->reg_lock); in reset_umac()
2521 if (priv->internal_phy) { in bcmgenet_link_intr_enable()
2525 } else if (priv->ext_phy) { in bcmgenet_link_intr_enable()
2527 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in bcmgenet_link_intr_enable()
2528 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) in bcmgenet_link_intr_enable()
2536 struct device *kdev = &priv->pdev->dev; in init_umac()
2540 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); in init_umac()
2566 * a valid CHK bit to be set in the per-packet status word in init_umac()
2568 if (priv->crc_fwd_en) in init_umac()
2580 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in init_umac()
2582 reg |= BIT(priv->hw_params->bp_in_en_shift); in init_umac()
2585 if (netif_is_multiqueue(priv->dev)) in init_umac()
2586 reg |= priv->hw_params->bp_in_mask; in init_umac()
2588 reg &= ~priv->hw_params->bp_in_mask; in init_umac()
2593 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) in init_umac()
2604 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_dim()
2606 INIT_WORK(&dim->dim.work, cb); in bcmgenet_init_dim()
2607 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in bcmgenet_init_dim()
2608 dim->event_ctr = 0; in bcmgenet_init_dim()
2609 dim->packets = 0; in bcmgenet_init_dim()
2610 dim->bytes = 0; in bcmgenet_init_dim()
2615 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_rx_coalesce()
2619 usecs = ring->rx_coalesce_usecs; in bcmgenet_init_rx_coalesce()
2620 pkts = ring->rx_max_coalesced_frames; in bcmgenet_init_rx_coalesce()
2622 /* If DIM was enabled, re-apply default parameters */ in bcmgenet_init_rx_coalesce()
2623 if (dim->use_dim) { in bcmgenet_init_rx_coalesce()
2624 moder = net_dim_get_def_rx_moderation(dim->dim.mode); in bcmgenet_init_rx_coalesce()
2637 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; in bcmgenet_init_tx_ring()
2641 spin_lock_init(&ring->lock); in bcmgenet_init_tx_ring()
2642 ring->priv = priv; in bcmgenet_init_tx_ring()
2643 ring->index = index; in bcmgenet_init_tx_ring()
2645 ring->queue = 0; in bcmgenet_init_tx_ring()
2646 ring->int_enable = bcmgenet_tx_ring16_int_enable; in bcmgenet_init_tx_ring()
2647 ring->int_disable = bcmgenet_tx_ring16_int_disable; in bcmgenet_init_tx_ring()
2649 ring->queue = index + 1; in bcmgenet_init_tx_ring()
2650 ring->int_enable = bcmgenet_tx_ring_int_enable; in bcmgenet_init_tx_ring()
2651 ring->int_disable = bcmgenet_tx_ring_int_disable; in bcmgenet_init_tx_ring()
2653 ring->cbs = priv->tx_cbs + start_ptr; in bcmgenet_init_tx_ring()
2654 ring->size = size; in bcmgenet_init_tx_ring()
2655 ring->clean_ptr = start_ptr; in bcmgenet_init_tx_ring()
2656 ring->c_index = 0; in bcmgenet_init_tx_ring()
2657 ring->free_bds = size; in bcmgenet_init_tx_ring()
2658 ring->write_ptr = start_ptr; in bcmgenet_init_tx_ring()
2659 ring->cb_ptr = start_ptr; in bcmgenet_init_tx_ring()
2660 ring->end_ptr = end_ptr - 1; in bcmgenet_init_tx_ring()
2661 ring->prod_index = 0; in bcmgenet_init_tx_ring()
2684 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_tx_ring()
2688 netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); in bcmgenet_init_tx_ring()
2696 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; in bcmgenet_init_rx_ring()
2700 ring->priv = priv; in bcmgenet_init_rx_ring()
2701 ring->index = index; in bcmgenet_init_rx_ring()
2703 ring->int_enable = bcmgenet_rx_ring16_int_enable; in bcmgenet_init_rx_ring()
2704 ring->int_disable = bcmgenet_rx_ring16_int_disable; in bcmgenet_init_rx_ring()
2706 ring->int_enable = bcmgenet_rx_ring_int_enable; in bcmgenet_init_rx_ring()
2707 ring->int_disable = bcmgenet_rx_ring_int_disable; in bcmgenet_init_rx_ring()
2709 ring->cbs = priv->rx_cbs + start_ptr; in bcmgenet_init_rx_ring()
2710 ring->size = size; in bcmgenet_init_rx_ring()
2711 ring->c_index = 0; in bcmgenet_init_rx_ring()
2712 ring->read_ptr = start_ptr; in bcmgenet_init_rx_ring()
2713 ring->cb_ptr = start_ptr; in bcmgenet_init_rx_ring()
2714 ring->end_ptr = end_ptr - 1; in bcmgenet_init_rx_ring()
2724 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); in bcmgenet_init_rx_ring()
2743 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_rx_ring()
2754 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_enable_tx_napi()
2755 ring = &priv->tx_rings[i]; in bcmgenet_enable_tx_napi()
2756 napi_enable(&ring->napi); in bcmgenet_enable_tx_napi()
2757 ring->int_enable(ring); in bcmgenet_enable_tx_napi()
2760 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_enable_tx_napi()
2761 napi_enable(&ring->napi); in bcmgenet_enable_tx_napi()
2762 ring->int_enable(ring); in bcmgenet_enable_tx_napi()
2770 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_disable_tx_napi()
2771 ring = &priv->tx_rings[i]; in bcmgenet_disable_tx_napi()
2772 napi_disable(&ring->napi); in bcmgenet_disable_tx_napi()
2775 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_disable_tx_napi()
2776 napi_disable(&ring->napi); in bcmgenet_disable_tx_napi()
2784 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_fini_tx_napi()
2785 ring = &priv->tx_rings[i]; in bcmgenet_fini_tx_napi()
2786 netif_napi_del(&ring->napi); in bcmgenet_fini_tx_napi()
2789 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_fini_tx_napi()
2790 netif_napi_del(&ring->napi); in bcmgenet_fini_tx_napi()
2795 * Queues 0-3 are priority-based, each one has 32 descriptors,
2799 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2802 * - Tx queue 0 uses tx_cbs[0..31]
2803 * - Tx queue 1 uses tx_cbs[32..63]
2804 * - Tx queue 2 uses tx_cbs[64..95]
2805 * - Tx queue 3 uses tx_cbs[96..127]
2806 * - Tx queue 16 uses tx_cbs[128..255]
2827 for (i = 0; i < priv->hw_params->tx_queues; i++) { in bcmgenet_init_tx_queues()
2828 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
2829 i * priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
2830 (i + 1) * priv->hw_params->tx_bds_per_q); in bcmgenet_init_tx_queues()
2839 priv->hw_params->tx_queues * in bcmgenet_init_tx_queues()
2840 priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
2845 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << in bcmgenet_init_tx_queues()
2867 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_enable_rx_napi()
2868 ring = &priv->rx_rings[i]; in bcmgenet_enable_rx_napi()
2869 napi_enable(&ring->napi); in bcmgenet_enable_rx_napi()
2870 ring->int_enable(ring); in bcmgenet_enable_rx_napi()
2873 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_enable_rx_napi()
2874 napi_enable(&ring->napi); in bcmgenet_enable_rx_napi()
2875 ring->int_enable(ring); in bcmgenet_enable_rx_napi()
2883 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_disable_rx_napi()
2884 ring = &priv->rx_rings[i]; in bcmgenet_disable_rx_napi()
2885 napi_disable(&ring->napi); in bcmgenet_disable_rx_napi()
2886 cancel_work_sync(&ring->dim.dim.work); in bcmgenet_disable_rx_napi()
2889 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_disable_rx_napi()
2890 napi_disable(&ring->napi); in bcmgenet_disable_rx_napi()
2891 cancel_work_sync(&ring->dim.dim.work); in bcmgenet_disable_rx_napi()
2899 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_fini_rx_napi()
2900 ring = &priv->rx_rings[i]; in bcmgenet_fini_rx_napi()
2901 netif_napi_del(&ring->napi); in bcmgenet_fini_rx_napi()
2904 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_fini_rx_napi()
2905 netif_napi_del(&ring->napi); in bcmgenet_fini_rx_napi()
2910 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2933 for (i = 0; i < priv->hw_params->rx_queues; i++) { in bcmgenet_init_rx_queues()
2935 priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2936 i * priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2938 priv->hw_params->rx_bds_per_q); in bcmgenet_init_rx_queues()
2948 priv->hw_params->rx_queues * in bcmgenet_init_rx_queues()
2949 priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2960 /* Configure ring as descriptor ring and re-enable DMA if enabled */ in bcmgenet_init_rx_queues()
2991 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); in bcmgenet_dma_teardown()
2992 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
3014 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); in bcmgenet_dma_teardown()
3015 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
3019 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_dma_teardown()
3026 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_dma_teardown()
3043 for (i = 0; i < priv->num_tx_bds; i++) in bcmgenet_fini_dma()
3044 dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, in bcmgenet_fini_dma()
3045 priv->tx_cbs + i)); in bcmgenet_fini_dma()
3047 for (i = 0; i < priv->hw_params->tx_queues; i++) { in bcmgenet_fini_dma()
3048 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); in bcmgenet_fini_dma()
3052 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); in bcmgenet_fini_dma()
3056 kfree(priv->rx_cbs); in bcmgenet_fini_dma()
3057 kfree(priv->tx_cbs); in bcmgenet_fini_dma()
3067 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_init_dma()
3070 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; in bcmgenet_init_dma()
3071 priv->num_rx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3072 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3074 if (!priv->rx_cbs) in bcmgenet_init_dma()
3075 return -ENOMEM; in bcmgenet_init_dma()
3077 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_init_dma()
3078 cb = priv->rx_cbs + i; in bcmgenet_init_dma()
3079 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3083 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; in bcmgenet_init_dma()
3084 priv->num_tx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3085 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3087 if (!priv->tx_cbs) { in bcmgenet_init_dma()
3088 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3089 return -ENOMEM; in bcmgenet_init_dma()
3092 for (i = 0; i < priv->num_tx_bds; i++) { in bcmgenet_init_dma()
3093 cb = priv->tx_cbs + i; in bcmgenet_init_dma()
3094 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3098 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3102 ret = bcmgenet_init_rx_queues(priv->dev); in bcmgenet_init_dma()
3104 netdev_err(priv->dev, "failed to initialize Rx queues\n"); in bcmgenet_init_dma()
3106 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3107 kfree(priv->tx_cbs); in bcmgenet_init_dma()
3112 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3116 bcmgenet_init_tx_queues(priv->dev); in bcmgenet_init_dma()
3128 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); in bcmgenet_irq_task()
3130 spin_lock_irq(&priv->lock); in bcmgenet_irq_task()
3131 status = priv->irq0_stat; in bcmgenet_irq_task()
3132 priv->irq0_stat = 0; in bcmgenet_irq_task()
3133 spin_unlock_irq(&priv->lock); in bcmgenet_irq_task()
3136 priv->dev->phydev->autoneg != AUTONEG_ENABLE) { in bcmgenet_irq_task()
3137 phy_init_hw(priv->dev->phydev); in bcmgenet_irq_task()
3138 genphy_config_aneg(priv->dev->phydev); in bcmgenet_irq_task()
3143 phy_mac_interrupt(priv->dev->phydev); in bcmgenet_irq_task()
3162 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr1()
3166 for (index = 0; index < priv->hw_params->rx_queues; index++) { in bcmgenet_isr1()
3170 rx_ring = &priv->rx_rings[index]; in bcmgenet_isr1()
3171 rx_ring->dim.event_ctr++; in bcmgenet_isr1()
3173 if (likely(napi_schedule_prep(&rx_ring->napi))) { in bcmgenet_isr1()
3174 rx_ring->int_disable(rx_ring); in bcmgenet_isr1()
3175 __napi_schedule_irqoff(&rx_ring->napi); in bcmgenet_isr1()
3180 for (index = 0; index < priv->hw_params->tx_queues; index++) { in bcmgenet_isr1()
3184 tx_ring = &priv->tx_rings[index]; in bcmgenet_isr1()
3186 if (likely(napi_schedule_prep(&tx_ring->napi))) { in bcmgenet_isr1()
3187 tx_ring->int_disable(tx_ring); in bcmgenet_isr1()
3188 __napi_schedule_irqoff(&tx_ring->napi); in bcmgenet_isr1()
3211 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr0()
3215 rx_ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_isr0()
3216 rx_ring->dim.event_ctr++; in bcmgenet_isr0()
3218 if (likely(napi_schedule_prep(&rx_ring->napi))) { in bcmgenet_isr0()
3219 rx_ring->int_disable(rx_ring); in bcmgenet_isr0()
3220 __napi_schedule_irqoff(&rx_ring->napi); in bcmgenet_isr0()
3225 tx_ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_isr0()
3227 if (likely(napi_schedule_prep(&tx_ring->napi))) { in bcmgenet_isr0()
3228 tx_ring->int_disable(tx_ring); in bcmgenet_isr0()
3229 __napi_schedule_irqoff(&tx_ring->napi); in bcmgenet_isr0()
3233 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && in bcmgenet_isr0()
3235 wake_up(&priv->wq); in bcmgenet_isr0()
3241 /* Save irq status for bottom-half processing. */ in bcmgenet_isr0()
3242 spin_lock_irqsave(&priv->lock, flags); in bcmgenet_isr0()
3243 priv->irq0_stat |= status; in bcmgenet_isr0()
3244 spin_unlock_irqrestore(&priv->lock, flags); in bcmgenet_isr0()
3246 schedule_work(&priv->bcmgenet_irq_work); in bcmgenet_isr0()
3264 disable_irq(priv->irq0); in bcmgenet_poll_controller()
3265 bcmgenet_isr0(priv->irq0, priv); in bcmgenet_poll_controller()
3266 enable_irq(priv->irq0); in bcmgenet_poll_controller()
3269 disable_irq(priv->irq1); in bcmgenet_poll_controller()
3270 bcmgenet_isr1(priv->irq1, priv); in bcmgenet_poll_controller()
3271 enable_irq(priv->irq1); in bcmgenet_poll_controller()
3316 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_dma_disable()
3323 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_dma_disable()
3374 phy_start(dev->phydev); in bcmgenet_netif_start()
3386 clk_prepare_enable(priv->clk); in bcmgenet_open()
3391 if (priv->internal_phy) in bcmgenet_open()
3402 bcmgenet_set_features(dev, dev->features); in bcmgenet_open()
3404 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_open()
3416 /* Always enable ring 16 - descriptor ring */ in bcmgenet_open()
3422 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, in bcmgenet_open()
3423 dev->name, priv); in bcmgenet_open()
3425 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); in bcmgenet_open()
3429 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, in bcmgenet_open()
3430 dev->name, priv); in bcmgenet_open()
3432 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); in bcmgenet_open()
3442 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_open()
3451 free_irq(priv->irq1, priv); in bcmgenet_open()
3453 free_irq(priv->irq0, priv); in bcmgenet_open()
3458 if (priv->internal_phy) in bcmgenet_open()
3460 clk_disable_unprepare(priv->clk); in bcmgenet_open()
3480 phy_stop(dev->phydev); in bcmgenet_netif_stop()
3487 cancel_work_sync(&priv->bcmgenet_irq_work); in bcmgenet_netif_stop()
3504 phy_disconnect(dev->phydev); in bcmgenet_close()
3506 free_irq(priv->irq0, priv); in bcmgenet_close()
3507 free_irq(priv->irq1, priv); in bcmgenet_close()
3509 if (priv->internal_phy) in bcmgenet_close()
3512 clk_disable_unprepare(priv->clk); in bcmgenet_close()
3519 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_dump_tx_queue()
3528 txq = netdev_get_tx_queue(priv->dev, ring->queue); in bcmgenet_dump_tx_queue()
3530 spin_lock(&ring->lock); in bcmgenet_dump_tx_queue()
3531 if (ring->index == DESC_INDEX) { in bcmgenet_dump_tx_queue()
3536 intmsk = 1 << ring->index; in bcmgenet_dump_tx_queue()
3538 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); in bcmgenet_dump_tx_queue()
3539 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); in bcmgenet_dump_tx_queue()
3541 free_bds = ring->free_bds; in bcmgenet_dump_tx_queue()
3542 spin_unlock(&ring->lock); in bcmgenet_dump_tx_queue()
3544 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" in bcmgenet_dump_tx_queue()
3551 ring->index, ring->queue, in bcmgenet_dump_tx_queue()
3554 free_bds, ring->size, in bcmgenet_dump_tx_queue()
3555 ring->prod_index, p_index & DMA_P_INDEX_MASK, in bcmgenet_dump_tx_queue()
3556 ring->c_index, c_index & DMA_C_INDEX_MASK, in bcmgenet_dump_tx_queue()
3557 ring->clean_ptr, ring->write_ptr, in bcmgenet_dump_tx_queue()
3558 ring->cb_ptr, ring->end_ptr); in bcmgenet_dump_tx_queue()
3570 for (q = 0; q < priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3571 bcmgenet_dump_tx_queue(&priv->tx_rings[q]); in bcmgenet_timeout()
3572 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); in bcmgenet_timeout()
3576 for (q = 0; q < priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3581 /* Re-enable TX interrupts if disabled */ in bcmgenet_timeout()
3587 dev->stats.tx_errors++; in bcmgenet_timeout()
3613 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); in bcmgenet_set_rx_mode()
3625 spin_lock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3627 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || in bcmgenet_set_rx_mode()
3631 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3637 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3643 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); in bcmgenet_set_rx_mode()
3645 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); in bcmgenet_set_rx_mode()
3649 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3653 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3656 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); in bcmgenet_set_rx_mode()
3669 return -EBUSY; in bcmgenet_set_mac_addr()
3671 eth_hw_addr_set(dev, addr->sa_data); in bcmgenet_set_mac_addr()
3686 for (q = 0; q < priv->hw_params->tx_queues; q++) { in bcmgenet_get_stats()
3687 tx_ring = &priv->tx_rings[q]; in bcmgenet_get_stats()
3688 tx_bytes += tx_ring->bytes; in bcmgenet_get_stats()
3689 tx_packets += tx_ring->packets; in bcmgenet_get_stats()
3691 tx_ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_get_stats()
3692 tx_bytes += tx_ring->bytes; in bcmgenet_get_stats()
3693 tx_packets += tx_ring->packets; in bcmgenet_get_stats()
3695 for (q = 0; q < priv->hw_params->rx_queues; q++) { in bcmgenet_get_stats()
3696 rx_ring = &priv->rx_rings[q]; in bcmgenet_get_stats()
3698 rx_bytes += rx_ring->bytes; in bcmgenet_get_stats()
3699 rx_packets += rx_ring->packets; in bcmgenet_get_stats()
3700 rx_errors += rx_ring->errors; in bcmgenet_get_stats()
3701 rx_dropped += rx_ring->dropped; in bcmgenet_get_stats()
3703 rx_ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_get_stats()
3704 rx_bytes += rx_ring->bytes; in bcmgenet_get_stats()
3705 rx_packets += rx_ring->packets; in bcmgenet_get_stats()
3706 rx_errors += rx_ring->errors; in bcmgenet_get_stats()
3707 rx_dropped += rx_ring->dropped; in bcmgenet_get_stats()
3709 dev->stats.tx_bytes = tx_bytes; in bcmgenet_get_stats()
3710 dev->stats.tx_packets = tx_packets; in bcmgenet_get_stats()
3711 dev->stats.rx_bytes = rx_bytes; in bcmgenet_get_stats()
3712 dev->stats.rx_packets = rx_packets; in bcmgenet_get_stats()
3713 dev->stats.rx_errors = rx_errors; in bcmgenet_get_stats()
3714 dev->stats.rx_missed_errors = rx_errors; in bcmgenet_get_stats()
3715 dev->stats.rx_dropped = rx_dropped; in bcmgenet_get_stats()
3716 return &dev->stats; in bcmgenet_get_stats()
3723 if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || in bcmgenet_change_carrier()
3724 priv->phy_interface != PHY_INTERFACE_MODE_MOCA) in bcmgenet_change_carrier()
3725 return -EOPNOTSUPP; in bcmgenet_change_carrier()
3866 priv->hw_params = &bcmgenet_hw_params[priv->version]; in bcmgenet_set_hw_params()
3867 params = priv->hw_params; in bcmgenet_set_hw_params()
3878 if (major != priv->version) { in bcmgenet_set_hw_params()
3879 dev_err(&priv->pdev->dev, in bcmgenet_set_hw_params()
3881 major, priv->version); in bcmgenet_set_hw_params()
3885 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, in bcmgenet_set_hw_params()
3897 * heuristic to check for the new GPHY revision and re-arrange things in bcmgenet_set_hw_params()
3916 priv->gphy_rev = gphy_rev << 8; in bcmgenet_set_hw_params()
3919 priv->gphy_rev = gphy_rev; in bcmgenet_set_hw_params()
3923 if (!(params->flags & GENET_HAS_40BITS)) in bcmgenet_set_hw_params()
3924 pr_warn("GENET does not support 40-bits PA\n"); in bcmgenet_set_hw_params()
3934 priv->version, in bcmgenet_set_hw_params()
3935 params->tx_queues, params->tx_bds_per_q, in bcmgenet_set_hw_params()
3936 params->rx_queues, params->rx_bds_per_q, in bcmgenet_set_hw_params()
3937 params->bp_in_en_shift, params->bp_in_mask, in bcmgenet_set_hw_params()
3938 params->hfb_filter_cnt, params->qtag_mask, in bcmgenet_set_hw_params()
3939 params->tbuf_offset, params->hfb_offset, in bcmgenet_set_hw_params()
3940 params->hfb_reg_offset, in bcmgenet_set_hw_params()
3941 params->rdma_offset, params->tdma_offset, in bcmgenet_set_hw_params()
3942 params->words_per_bd); in bcmgenet_set_hw_params()
3988 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3989 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3990 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3991 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3992 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3993 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3994 { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
4001 struct bcmgenet_platform_data *pd = pdev->dev.platform_data; in bcmgenet_probe()
4006 int err = -EIO; in bcmgenet_probe()
4012 dev_err(&pdev->dev, "can't allocate net device\n"); in bcmgenet_probe()
4013 return -ENOMEM; in bcmgenet_probe()
4017 priv->irq0 = platform_get_irq(pdev, 0); in bcmgenet_probe()
4018 if (priv->irq0 < 0) { in bcmgenet_probe()
4019 err = priv->irq0; in bcmgenet_probe()
4022 priv->irq1 = platform_get_irq(pdev, 1); in bcmgenet_probe()
4023 if (priv->irq1 < 0) { in bcmgenet_probe()
4024 err = priv->irq1; in bcmgenet_probe()
4027 priv->wol_irq = platform_get_irq_optional(pdev, 2); in bcmgenet_probe()
4028 if (priv->wol_irq == -EPROBE_DEFER) { in bcmgenet_probe()
4029 err = priv->wol_irq; in bcmgenet_probe()
4033 priv->base = devm_platform_ioremap_resource(pdev, 0); in bcmgenet_probe()
4034 if (IS_ERR(priv->base)) { in bcmgenet_probe()
4035 err = PTR_ERR(priv->base); in bcmgenet_probe()
4039 spin_lock_init(&priv->reg_lock); in bcmgenet_probe()
4040 spin_lock_init(&priv->lock); in bcmgenet_probe()
4043 priv->autoneg_pause = 1; in bcmgenet_probe()
4044 priv->tx_pause = 1; in bcmgenet_probe()
4045 priv->rx_pause = 1; in bcmgenet_probe()
4047 SET_NETDEV_DEV(dev, &pdev->dev); in bcmgenet_probe()
4048 dev_set_drvdata(&pdev->dev, dev); in bcmgenet_probe()
4049 dev->watchdog_timeo = 2 * HZ; in bcmgenet_probe()
4050 dev->ethtool_ops = &bcmgenet_ethtool_ops; in bcmgenet_probe()
4051 dev->netdev_ops = &bcmgenet_netdev_ops; in bcmgenet_probe()
4053 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); in bcmgenet_probe()
4056 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | in bcmgenet_probe()
4058 dev->hw_features |= dev->features; in bcmgenet_probe()
4059 dev->vlan_features |= dev->features; in bcmgenet_probe()
4062 priv->wol_irq_disabled = true; in bcmgenet_probe()
4063 if (priv->wol_irq > 0) { in bcmgenet_probe()
4064 err = devm_request_irq(&pdev->dev, priv->wol_irq, in bcmgenet_probe()
4065 bcmgenet_wol_isr, 0, dev->name, priv); in bcmgenet_probe()
4067 device_set_wakeup_capable(&pdev->dev, 1); in bcmgenet_probe()
4073 dev->needed_headroom += 64; in bcmgenet_probe()
4075 priv->dev = dev; in bcmgenet_probe()
4076 priv->pdev = pdev; in bcmgenet_probe()
4078 pdata = device_get_match_data(&pdev->dev); in bcmgenet_probe()
4080 priv->version = pdata->version; in bcmgenet_probe()
4081 priv->dma_max_burst_length = pdata->dma_max_burst_length; in bcmgenet_probe()
4082 priv->ephy_16nm = pdata->ephy_16nm; in bcmgenet_probe()
4084 priv->version = pd->genet_version; in bcmgenet_probe()
4085 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; in bcmgenet_probe()
4088 priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); in bcmgenet_probe()
4089 if (IS_ERR(priv->clk)) { in bcmgenet_probe()
4090 dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); in bcmgenet_probe()
4091 err = PTR_ERR(priv->clk); in bcmgenet_probe()
4095 err = clk_prepare_enable(priv->clk); in bcmgenet_probe()
4101 err = -EIO; in bcmgenet_probe()
4102 if (priv->hw_params->flags & GENET_HAS_40BITS) in bcmgenet_probe()
4103 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in bcmgenet_probe()
4105 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in bcmgenet_probe()
4110 init_waitqueue_head(&priv->wq); in bcmgenet_probe()
4112 priv->rx_buf_len = RX_BUF_LENGTH; in bcmgenet_probe()
4113 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); in bcmgenet_probe()
4115 priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); in bcmgenet_probe()
4116 if (IS_ERR(priv->clk_wol)) { in bcmgenet_probe()
4117 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); in bcmgenet_probe()
4118 err = PTR_ERR(priv->clk_wol); in bcmgenet_probe()
4122 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); in bcmgenet_probe()
4123 if (IS_ERR(priv->clk_eee)) { in bcmgenet_probe()
4124 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); in bcmgenet_probe()
4125 err = PTR_ERR(priv->clk_eee); in bcmgenet_probe()
4132 if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) in bcmgenet_probe()
4135 if (pd && !IS_ERR_OR_NULL(pd->mac_address)) in bcmgenet_probe()
4136 eth_hw_addr_set(dev, pd->mac_address); in bcmgenet_probe()
4138 if (device_get_ethdev_address(&pdev->dev, dev)) in bcmgenet_probe()
4139 if (has_acpi_companion(&pdev->dev)) { in bcmgenet_probe()
4146 if (!is_valid_ether_addr(dev->dev_addr)) { in bcmgenet_probe()
4147 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); in bcmgenet_probe()
4160 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); in bcmgenet_probe()
4161 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); in bcmgenet_probe()
4164 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_probe()
4165 priv->rx_rings[i].rx_max_coalesced_frames = 1; in bcmgenet_probe()
4166 priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; in bcmgenet_probe()
4172 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
4183 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
4191 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); in bcmgenet_remove()
4193 dev_set_drvdata(&pdev->dev, NULL); in bcmgenet_remove()
4194 unregister_netdev(priv->dev); in bcmgenet_remove()
4195 bcmgenet_mii_exit(priv->dev); in bcmgenet_remove()
4196 free_netdev(priv->dev); in bcmgenet_remove()
4218 ret = clk_prepare_enable(priv->clk); in bcmgenet_resume_noirq()
4222 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_resume_noirq()
4223 /* Account for Wake-on-LAN events and clear those events in bcmgenet_resume_noirq()
4231 pm_wakeup_event(&priv->pdev->dev, 0); in bcmgenet_resume_noirq()
4250 /* From WOL-enabled suspend, switch to regular clock */ in bcmgenet_resume()
4251 if (device_may_wakeup(d) && priv->wolopts) in bcmgenet_resume()
4257 if (priv->internal_phy) in bcmgenet_resume()
4264 phy_init_hw(dev->phydev); in bcmgenet_resume()
4267 genphy_config_aneg(dev->phydev); in bcmgenet_resume()
4268 bcmgenet_mii_config(priv->dev, false); in bcmgenet_resume()
4271 bcmgenet_set_features(dev, dev->features); in bcmgenet_resume()
4273 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_resume()
4277 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_resume()
4278 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_resume()
4291 /* Always enable ring 16 - descriptor ring */ in bcmgenet_resume()
4295 phy_resume(dev->phydev); in bcmgenet_resume()
4304 if (priv->internal_phy) in bcmgenet_resume()
4306 clk_disable_unprepare(priv->clk); in bcmgenet_resume()
4323 phy_suspend(dev->phydev); in bcmgenet_suspend()
4340 /* Prepare the device for Wake-on-LAN and switch to the slow clock */ in bcmgenet_suspend_noirq()
4341 if (device_may_wakeup(d) && priv->wolopts) in bcmgenet_suspend_noirq()
4343 else if (priv->internal_phy) in bcmgenet_suspend_noirq()
4351 clk_disable_unprepare(priv->clk); in bcmgenet_suspend_noirq()
4392 MODULE_SOFTDEP("pre: mdio-bcm-unimac");