Lines Matching +full:synclko +full:- +full:disable

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2017-2019 Microchip Technology Inc.
14 #include <linux/platform_data/microchip-ksz.h>
1167 * port map is NOT continuous. The per-port register
1587 if (chip->chip_id == prod_num) in ksz_lookup_info()
1598 dt_chip_data = of_device_get_match_data(dev->dev); in ksz_check_device_id()
1601 if (dt_chip_data->chip_id != dev->chip_id) { in ksz_check_device_id()
1602 dev_err(dev->dev, in ksz_check_device_id()
1604 dt_chip_data->dev_name, dev->info->dev_name); in ksz_check_device_id()
1605 return -ENODEV; in ksz_check_device_id()
1614 struct ksz_device *dev = ds->priv; in ksz_phylink_get_caps()
1616 if (dev->info->supports_mii[port]) in ksz_phylink_get_caps()
1617 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); in ksz_phylink_get_caps()
1619 if (dev->info->supports_rmii[port]) in ksz_phylink_get_caps()
1621 config->supported_interfaces); in ksz_phylink_get_caps()
1623 if (dev->info->supports_rgmii[port]) in ksz_phylink_get_caps()
1624 phy_interface_set_rgmii(config->supported_interfaces); in ksz_phylink_get_caps()
1626 if (dev->info->internal_phy[port]) { in ksz_phylink_get_caps()
1628 config->supported_interfaces); in ksz_phylink_get_caps()
1630 * phy-mode property is absent in ksz_phylink_get_caps()
1633 config->supported_interfaces); in ksz_phylink_get_caps()
1636 if (dev->dev_ops->get_caps) in ksz_phylink_get_caps()
1637 dev->dev_ops->get_caps(dev, port, config); in ksz_phylink_get_caps()
1647 mib = &dev->ports[port].mib; in ksz_r_mib_stats64()
1648 stats = &mib->stats64; in ksz_r_mib_stats64()
1649 pstats = &mib->pause_stats; in ksz_r_mib_stats64()
1650 raw = (struct ksz_stats_raw *)mib->counters; in ksz_r_mib_stats64()
1652 spin_lock(&mib->stats64_lock); in ksz_r_mib_stats64()
1654 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz_r_mib_stats64()
1655 raw->rx_pause; in ksz_r_mib_stats64()
1656 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz_r_mib_stats64()
1657 raw->tx_pause; in ksz_r_mib_stats64()
1662 stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
1663 stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
1665 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz_r_mib_stats64()
1666 raw->rx_oversize; in ksz_r_mib_stats64()
1668 stats->rx_crc_errors = raw->rx_crc_err; in ksz_r_mib_stats64()
1669 stats->rx_frame_errors = raw->rx_align_err; in ksz_r_mib_stats64()
1670 stats->rx_dropped = raw->rx_discards; in ksz_r_mib_stats64()
1671 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz_r_mib_stats64()
1672 stats->rx_frame_errors + stats->rx_dropped; in ksz_r_mib_stats64()
1674 stats->tx_window_errors = raw->tx_late_col; in ksz_r_mib_stats64()
1675 stats->tx_fifo_errors = raw->tx_discards; in ksz_r_mib_stats64()
1676 stats->tx_aborted_errors = raw->tx_exc_col; in ksz_r_mib_stats64()
1677 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz_r_mib_stats64()
1678 stats->tx_aborted_errors; in ksz_r_mib_stats64()
1680 stats->multicast = raw->rx_mcast; in ksz_r_mib_stats64()
1681 stats->collisions = raw->tx_total_col; in ksz_r_mib_stats64()
1683 pstats->tx_pause_frames = raw->tx_pause; in ksz_r_mib_stats64()
1684 pstats->rx_pause_frames = raw->rx_pause; in ksz_r_mib_stats64()
1686 spin_unlock(&mib->stats64_lock); in ksz_r_mib_stats64()
1696 mib = &dev->ports[port].mib; in ksz88xx_r_mib_stats64()
1697 stats = &mib->stats64; in ksz88xx_r_mib_stats64()
1698 pstats = &mib->pause_stats; in ksz88xx_r_mib_stats64()
1699 raw = (struct ksz88xx_stats_raw *)mib->counters; in ksz88xx_r_mib_stats64()
1701 spin_lock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
1703 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz88xx_r_mib_stats64()
1704 raw->rx_pause; in ksz88xx_r_mib_stats64()
1705 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz88xx_r_mib_stats64()
1706 raw->tx_pause; in ksz88xx_r_mib_stats64()
1711 stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
1712 stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
1714 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz88xx_r_mib_stats64()
1715 raw->rx_oversize; in ksz88xx_r_mib_stats64()
1717 stats->rx_crc_errors = raw->rx_crc_err; in ksz88xx_r_mib_stats64()
1718 stats->rx_frame_errors = raw->rx_align_err; in ksz88xx_r_mib_stats64()
1719 stats->rx_dropped = raw->rx_discards; in ksz88xx_r_mib_stats64()
1720 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz88xx_r_mib_stats64()
1721 stats->rx_frame_errors + stats->rx_dropped; in ksz88xx_r_mib_stats64()
1723 stats->tx_window_errors = raw->tx_late_col; in ksz88xx_r_mib_stats64()
1724 stats->tx_fifo_errors = raw->tx_discards; in ksz88xx_r_mib_stats64()
1725 stats->tx_aborted_errors = raw->tx_exc_col; in ksz88xx_r_mib_stats64()
1726 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz88xx_r_mib_stats64()
1727 stats->tx_aborted_errors; in ksz88xx_r_mib_stats64()
1729 stats->multicast = raw->rx_mcast; in ksz88xx_r_mib_stats64()
1730 stats->collisions = raw->tx_total_col; in ksz88xx_r_mib_stats64()
1732 pstats->tx_pause_frames = raw->tx_pause; in ksz88xx_r_mib_stats64()
1733 pstats->rx_pause_frames = raw->rx_pause; in ksz88xx_r_mib_stats64()
1735 spin_unlock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
1741 struct ksz_device *dev = ds->priv; in ksz_get_stats64()
1744 mib = &dev->ports[port].mib; in ksz_get_stats64()
1746 spin_lock(&mib->stats64_lock); in ksz_get_stats64()
1747 memcpy(s, &mib->stats64, sizeof(*s)); in ksz_get_stats64()
1748 spin_unlock(&mib->stats64_lock); in ksz_get_stats64()
1754 struct ksz_device *dev = ds->priv; in ksz_get_pause_stats()
1757 mib = &dev->ports[port].mib; in ksz_get_pause_stats()
1759 spin_lock(&mib->stats64_lock); in ksz_get_pause_stats()
1760 memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats)); in ksz_get_pause_stats()
1761 spin_unlock(&mib->stats64_lock); in ksz_get_pause_stats()
1767 struct ksz_device *dev = ds->priv; in ksz_get_strings()
1773 for (i = 0; i < dev->info->mib_cnt; i++) { in ksz_get_strings()
1775 dev->info->mib_names[i].string, ETH_GSTRING_LEN); in ksz_get_strings()
1781 struct ksz_port *p = &dev->ports[port]; in ksz_update_port_member()
1782 struct dsa_switch *ds = dev->ds; in ksz_update_port_member()
1793 for (i = 0; i < ds->num_ports; i++) { in ksz_update_port_member()
1795 struct ksz_port *other_p = &dev->ports[i]; in ksz_update_port_member()
1804 if (other_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
1807 if (p->stp_state == BR_STATE_FORWARDING) { in ksz_update_port_member()
1813 for (j = 0; j < ds->num_ports; j++) { in ksz_update_port_member()
1823 third_p = &dev->ports[j]; in ksz_update_port_member()
1824 if (third_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
1831 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); in ksz_update_port_member()
1834 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); in ksz_update_port_member()
1839 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_read()
1843 ret = dev->dev_ops->r_phy(dev, addr, regnum, &val); in ksz_sw_mdio_read()
1853 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_write()
1855 return dev->dev_ops->w_phy(dev, addr, regnum, val); in ksz_sw_mdio_write()
1860 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_setup()
1866 if (BIT(phy) & ds->phys_mii_mask) { in ksz_irq_phy_setup()
1867 irq = irq_find_mapping(dev->ports[phy].pirq.domain, in ksz_irq_phy_setup()
1873 ds->slave_mii_bus->irq[phy] = irq; in ksz_irq_phy_setup()
1878 while (phy--) in ksz_irq_phy_setup()
1879 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_setup()
1880 irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); in ksz_irq_phy_setup()
1887 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_free()
1891 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_free()
1892 irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); in ksz_irq_phy_free()
1897 struct dsa_switch *ds = dev->ds; in ksz_mdio_register()
1902 mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio"); in ksz_mdio_register()
1906 bus = devm_mdiobus_alloc(ds->dev); in ksz_mdio_register()
1909 return -ENOMEM; in ksz_mdio_register()
1912 bus->priv = dev; in ksz_mdio_register()
1913 bus->read = ksz_sw_mdio_read; in ksz_mdio_register()
1914 bus->write = ksz_sw_mdio_write; in ksz_mdio_register()
1915 bus->name = "ksz slave smi"; in ksz_mdio_register()
1916 snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); in ksz_mdio_register()
1917 bus->parent = ds->dev; in ksz_mdio_register()
1918 bus->phy_mask = ~ds->phys_mii_mask; in ksz_mdio_register()
1920 ds->slave_mii_bus = bus; in ksz_mdio_register()
1922 if (dev->irq > 0) { in ksz_mdio_register()
1930 ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np); in ksz_mdio_register()
1932 dev_err(ds->dev, "unable to register MDIO bus %s\n", in ksz_mdio_register()
1933 bus->id); in ksz_mdio_register()
1934 if (dev->irq > 0) in ksz_mdio_register()
1947 kirq->masked |= BIT(d->hwirq); in ksz_irq_mask()
1954 kirq->masked &= ~BIT(d->hwirq); in ksz_irq_unmask()
1961 mutex_lock(&kirq->dev->lock_irq); in ksz_irq_bus_lock()
1967 struct ksz_device *dev = kirq->dev; in ksz_irq_bus_sync_unlock()
1970 ret = ksz_write8(dev, kirq->reg_mask, kirq->masked); in ksz_irq_bus_sync_unlock()
1972 dev_err(dev->dev, "failed to change IRQ mask\n"); in ksz_irq_bus_sync_unlock()
1974 mutex_unlock(&dev->lock_irq); in ksz_irq_bus_sync_unlock()
1978 .name = "ksz-irq",
1988 irq_set_chip_data(irq, d->host_data); in ksz_irq_domain_map()
2004 free_irq(kirq->irq_num, kirq); in ksz_irq_free()
2006 for (irq = 0; irq < kirq->nirqs; irq++) { in ksz_irq_free()
2007 virq = irq_find_mapping(kirq->domain, irq); in ksz_irq_free()
2011 irq_domain_remove(kirq->domain); in ksz_irq_free()
2024 dev = kirq->dev; in ksz_irq_thread_fn()
2027 ret = ksz_read8(dev, kirq->reg_status, &data); in ksz_irq_thread_fn()
2031 for (n = 0; n < kirq->nirqs; ++n) { in ksz_irq_thread_fn()
2033 sub_irq = irq_find_mapping(kirq->domain, n); in ksz_irq_thread_fn()
2046 kirq->dev = dev; in ksz_irq_common_setup()
2047 kirq->masked = ~0; in ksz_irq_common_setup()
2049 kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0, in ksz_irq_common_setup()
2051 if (!kirq->domain) in ksz_irq_common_setup()
2052 return -ENOMEM; in ksz_irq_common_setup()
2054 for (n = 0; n < kirq->nirqs; n++) in ksz_irq_common_setup()
2055 irq_create_mapping(kirq->domain, n); in ksz_irq_common_setup()
2057 ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn, in ksz_irq_common_setup()
2058 IRQF_ONESHOT, kirq->name, kirq); in ksz_irq_common_setup()
2072 struct ksz_irq *girq = &dev->girq; in ksz_girq_setup()
2074 girq->nirqs = dev->info->port_cnt; in ksz_girq_setup()
2075 girq->reg_mask = REG_SW_PORT_INT_MASK__1; in ksz_girq_setup()
2076 girq->reg_status = REG_SW_PORT_INT_STATUS__1; in ksz_girq_setup()
2077 snprintf(girq->name, sizeof(girq->name), "global_port_irq"); in ksz_girq_setup()
2079 girq->irq_num = dev->irq; in ksz_girq_setup()
2086 struct ksz_irq *pirq = &dev->ports[p].pirq; in ksz_pirq_setup()
2088 pirq->nirqs = dev->info->port_nirqs; in ksz_pirq_setup()
2089 pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK); in ksz_pirq_setup()
2090 pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS); in ksz_pirq_setup()
2091 snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p); in ksz_pirq_setup()
2093 pirq->irq_num = irq_find_mapping(dev->girq.domain, p); in ksz_pirq_setup()
2094 if (pirq->irq_num < 0) in ksz_pirq_setup()
2095 return pirq->irq_num; in ksz_pirq_setup()
2102 struct ksz_device *dev = ds->priv; in ksz_setup()
2108 regs = dev->info->regs; in ksz_setup()
2110 dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), in ksz_setup()
2111 dev->info->num_vlans, GFP_KERNEL); in ksz_setup()
2112 if (!dev->vlan_cache) in ksz_setup()
2113 return -ENOMEM; in ksz_setup()
2115 ret = dev->dev_ops->reset(dev); in ksz_setup()
2117 dev_err(ds->dev, "failed to reset switch\n"); in ksz_setup()
2127 dev->dev_ops->config_cpu_port(ds); in ksz_setup()
2129 dev->dev_ops->enable_stp_addr(dev); in ksz_setup()
2131 ds->num_tx_queues = dev->info->num_tx_queues; in ksz_setup()
2138 ds->configure_vlan_while_not_filtering = false; in ksz_setup()
2140 if (dev->dev_ops->setup) { in ksz_setup()
2141 ret = dev->dev_ops->setup(ds); in ksz_setup()
2151 p = &dev->ports[dev->cpu_port]; in ksz_setup()
2152 p->learning = true; in ksz_setup()
2154 if (dev->irq > 0) { in ksz_setup()
2159 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_setup()
2160 ret = ksz_pirq_setup(dev, dp->index); in ksz_setup()
2164 ret = ksz_ptp_irq_setup(ds, dp->index); in ksz_setup()
2172 dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret); in ksz_setup()
2178 dev_err(dev->dev, "failed to register the mdio"); in ksz_setup()
2191 if (dev->irq > 0) in ksz_setup()
2192 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
2193 ksz_ptp_irq_free(ds, dp->index); in ksz_setup()
2195 if (dev->irq > 0) in ksz_setup()
2196 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
2197 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_setup()
2199 if (dev->irq > 0) in ksz_setup()
2200 ksz_irq_free(&dev->girq); in ksz_setup()
2207 struct ksz_device *dev = ds->priv; in ksz_teardown()
2212 if (dev->irq > 0) { in ksz_teardown()
2213 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_teardown()
2214 ksz_ptp_irq_free(ds, dp->index); in ksz_teardown()
2216 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_teardown()
2219 ksz_irq_free(&dev->girq); in ksz_teardown()
2222 if (dev->dev_ops->teardown) in ksz_teardown()
2223 dev->dev_ops->teardown(ds); in ksz_teardown()
2228 struct ksz_port_mib *mib = &dev->ports[port].mib; in port_r_cnt()
2232 while (mib->cnt_ptr < dev->info->reg_mib_cnt) { in port_r_cnt()
2233 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, in port_r_cnt()
2234 &mib->counters[mib->cnt_ptr]); in port_r_cnt()
2235 ++mib->cnt_ptr; in port_r_cnt()
2239 dropped = &mib->counters[dev->info->mib_cnt]; in port_r_cnt()
2242 while (mib->cnt_ptr < dev->info->mib_cnt) { in port_r_cnt()
2243 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, in port_r_cnt()
2244 dropped, &mib->counters[mib->cnt_ptr]); in port_r_cnt()
2245 ++mib->cnt_ptr; in port_r_cnt()
2247 mib->cnt_ptr = 0; in port_r_cnt()
2258 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_mib_read_work()
2259 if (dsa_is_unused_port(dev->ds, i)) in ksz_mib_read_work()
2262 p = &dev->ports[i]; in ksz_mib_read_work()
2263 mib = &p->mib; in ksz_mib_read_work()
2264 mutex_lock(&mib->cnt_mutex); in ksz_mib_read_work()
2269 if (!p->read) { in ksz_mib_read_work()
2270 const struct dsa_port *dp = dsa_to_port(dev->ds, i); in ksz_mib_read_work()
2272 if (!netif_carrier_ok(dp->slave)) in ksz_mib_read_work()
2273 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_mib_read_work()
2276 p->read = false; in ksz_mib_read_work()
2278 if (dev->dev_ops->r_mib_stat64) in ksz_mib_read_work()
2279 dev->dev_ops->r_mib_stat64(dev, i); in ksz_mib_read_work()
2281 mutex_unlock(&mib->cnt_mutex); in ksz_mib_read_work()
2284 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_mib_read_work()
2291 INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); in ksz_init_mib_timer()
2293 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_init_mib_timer()
2294 struct ksz_port_mib *mib = &dev->ports[i].mib; in ksz_init_mib_timer()
2296 dev->dev_ops->port_init_cnt(dev, i); in ksz_init_mib_timer()
2298 mib->cnt_ptr = 0; in ksz_init_mib_timer()
2299 memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); in ksz_init_mib_timer()
2305 struct ksz_device *dev = ds->priv; in ksz_phy_read16()
2309 ret = dev->dev_ops->r_phy(dev, addr, reg, &val); in ksz_phy_read16()
2318 struct ksz_device *dev = ds->priv; in ksz_phy_write16()
2321 ret = dev->dev_ops->w_phy(dev, addr, reg, val); in ksz_phy_write16()
2330 struct ksz_device *dev = ds->priv; in ksz_get_phy_flags()
2332 switch (dev->chip_id) { in ksz_get_phy_flags()
2335 * Port 1 does not work with LinkMD Cable-Testing. in ksz_get_phy_flags()
2348 * controls. If not disabled, the PHY ports can auto-negotiate in ksz_get_phy_flags()
2361 struct ksz_device *dev = ds->priv; in ksz_mac_link_down()
2362 struct ksz_port *p = &dev->ports[port]; in ksz_mac_link_down()
2365 p->read = true; in ksz_mac_link_down()
2367 if (dev->mib_read_interval) in ksz_mac_link_down()
2368 schedule_delayed_work(&dev->mib_read, 0); in ksz_mac_link_down()
2373 struct ksz_device *dev = ds->priv; in ksz_sset_count()
2378 return dev->info->mib_cnt; in ksz_sset_count()
2385 struct ksz_device *dev = ds->priv; in ksz_get_ethtool_stats()
2388 mib = &dev->ports[port].mib; in ksz_get_ethtool_stats()
2389 mutex_lock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
2392 if (!netif_carrier_ok(dp->slave)) in ksz_get_ethtool_stats()
2393 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_get_ethtool_stats()
2395 memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); in ksz_get_ethtool_stats()
2396 mutex_unlock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
2421 struct ksz_device *dev = ds->priv; in ksz_port_fast_age()
2423 dev->dev_ops->flush_dyn_mac_table(dev, port); in ksz_port_fast_age()
2428 struct ksz_device *dev = ds->priv; in ksz_set_ageing_time()
2430 if (!dev->dev_ops->set_ageing_time) in ksz_set_ageing_time()
2431 return -EOPNOTSUPP; in ksz_set_ageing_time()
2433 return dev->dev_ops->set_ageing_time(dev, msecs); in ksz_set_ageing_time()
2440 struct ksz_device *dev = ds->priv; in ksz_port_fdb_add()
2442 if (!dev->dev_ops->fdb_add) in ksz_port_fdb_add()
2443 return -EOPNOTSUPP; in ksz_port_fdb_add()
2445 return dev->dev_ops->fdb_add(dev, port, addr, vid, db); in ksz_port_fdb_add()
2452 struct ksz_device *dev = ds->priv; in ksz_port_fdb_del()
2454 if (!dev->dev_ops->fdb_del) in ksz_port_fdb_del()
2455 return -EOPNOTSUPP; in ksz_port_fdb_del()
2457 return dev->dev_ops->fdb_del(dev, port, addr, vid, db); in ksz_port_fdb_del()
2463 struct ksz_device *dev = ds->priv; in ksz_port_fdb_dump()
2465 if (!dev->dev_ops->fdb_dump) in ksz_port_fdb_dump()
2466 return -EOPNOTSUPP; in ksz_port_fdb_dump()
2468 return dev->dev_ops->fdb_dump(dev, port, cb, data); in ksz_port_fdb_dump()
2475 struct ksz_device *dev = ds->priv; in ksz_port_mdb_add()
2477 if (!dev->dev_ops->mdb_add) in ksz_port_mdb_add()
2478 return -EOPNOTSUPP; in ksz_port_mdb_add()
2480 return dev->dev_ops->mdb_add(dev, port, mdb, db); in ksz_port_mdb_add()
2487 struct ksz_device *dev = ds->priv; in ksz_port_mdb_del()
2489 if (!dev->dev_ops->mdb_del) in ksz_port_mdb_del()
2490 return -EOPNOTSUPP; in ksz_port_mdb_del()
2492 return dev->dev_ops->mdb_del(dev, port, mdb, db); in ksz_port_mdb_del()
2498 struct ksz_device *dev = ds->priv; in ksz_enable_port()
2504 dev->dev_ops->port_setup(dev, port, false); in ksz_enable_port()
2515 struct ksz_device *dev = ds->priv; in ksz_port_stp_state_set()
2520 regs = dev->info->regs; in ksz_port_stp_state_set()
2525 p = &dev->ports[port]; in ksz_port_stp_state_set()
2536 if (!p->learning) in ksz_port_stp_state_set()
2541 if (!p->learning) in ksz_port_stp_state_set()
2548 dev_err(ds->dev, "invalid STP state: %d\n", state); in ksz_port_stp_state_set()
2554 p->stp_state = state; in ksz_port_stp_state_set()
2564 return -EINVAL; in ksz_port_pre_bridge_flags()
2573 struct ksz_device *dev = ds->priv; in ksz_port_bridge_flags()
2574 struct ksz_port *p = &dev->ports[port]; in ksz_port_bridge_flags()
2577 p->learning = !!(flags.val & BR_LEARNING); in ksz_port_bridge_flags()
2580 ksz_port_stp_state_set(ds, port, p->stp_state); in ksz_port_bridge_flags()
2590 struct ksz_device *dev = ds->priv; in ksz_get_tag_protocol()
2593 if (dev->chip_id == KSZ8795_CHIP_ID || in ksz_get_tag_protocol()
2594 dev->chip_id == KSZ8794_CHIP_ID || in ksz_get_tag_protocol()
2595 dev->chip_id == KSZ8765_CHIP_ID) in ksz_get_tag_protocol()
2598 if (dev->chip_id == KSZ8830_CHIP_ID || in ksz_get_tag_protocol()
2599 dev->chip_id == KSZ8563_CHIP_ID || in ksz_get_tag_protocol()
2600 dev->chip_id == KSZ9893_CHIP_ID || in ksz_get_tag_protocol()
2601 dev->chip_id == KSZ9563_CHIP_ID) in ksz_get_tag_protocol()
2604 if (dev->chip_id == KSZ9477_CHIP_ID || in ksz_get_tag_protocol()
2605 dev->chip_id == KSZ9896_CHIP_ID || in ksz_get_tag_protocol()
2606 dev->chip_id == KSZ9897_CHIP_ID || in ksz_get_tag_protocol()
2607 dev->chip_id == KSZ9567_CHIP_ID) in ksz_get_tag_protocol()
2628 tagger_data->xmit_work_fn = ksz_port_deferred_xmit; in ksz_connect_tag_protocol()
2631 return -EPROTONOSUPPORT; in ksz_connect_tag_protocol()
2638 struct ksz_device *dev = ds->priv; in ksz_port_vlan_filtering()
2640 if (!dev->dev_ops->vlan_filtering) in ksz_port_vlan_filtering()
2641 return -EOPNOTSUPP; in ksz_port_vlan_filtering()
2643 return dev->dev_ops->vlan_filtering(dev, port, flag, extack); in ksz_port_vlan_filtering()
2650 struct ksz_device *dev = ds->priv; in ksz_port_vlan_add()
2652 if (!dev->dev_ops->vlan_add) in ksz_port_vlan_add()
2653 return -EOPNOTSUPP; in ksz_port_vlan_add()
2655 return dev->dev_ops->vlan_add(dev, port, vlan, extack); in ksz_port_vlan_add()
2661 struct ksz_device *dev = ds->priv; in ksz_port_vlan_del()
2663 if (!dev->dev_ops->vlan_del) in ksz_port_vlan_del()
2664 return -EOPNOTSUPP; in ksz_port_vlan_del()
2666 return dev->dev_ops->vlan_del(dev, port, vlan); in ksz_port_vlan_del()
2673 struct ksz_device *dev = ds->priv; in ksz_port_mirror_add()
2675 if (!dev->dev_ops->mirror_add) in ksz_port_mirror_add()
2676 return -EOPNOTSUPP; in ksz_port_mirror_add()
2678 return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack); in ksz_port_mirror_add()
2684 struct ksz_device *dev = ds->priv; in ksz_port_mirror_del()
2686 if (dev->dev_ops->mirror_del) in ksz_port_mirror_del()
2687 dev->dev_ops->mirror_del(dev, port, mirror); in ksz_port_mirror_del()
2692 struct ksz_device *dev = ds->priv; in ksz_change_mtu()
2694 if (!dev->dev_ops->change_mtu) in ksz_change_mtu()
2695 return -EOPNOTSUPP; in ksz_change_mtu()
2697 return dev->dev_ops->change_mtu(dev, port, mtu); in ksz_change_mtu()
2702 struct ksz_device *dev = ds->priv; in ksz_max_mtu()
2704 switch (dev->chip_id) { in ksz_max_mtu()
2708 return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
2710 return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
2723 return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
2726 return -EOPNOTSUPP; in ksz_max_mtu()
2731 struct ksz_device *dev = ds->priv; in ksz_validate_eee()
2733 if (!dev->info->internal_phy[port]) in ksz_validate_eee()
2734 return -EOPNOTSUPP; in ksz_validate_eee()
2736 switch (dev->chip_id) { in ksz_validate_eee()
2747 return -EOPNOTSUPP; in ksz_validate_eee()
2760 e->tx_lpi_enabled = true; in ksz_get_mac_eee()
2765 e->tx_lpi_timer = 0; in ksz_get_mac_eee()
2773 struct ksz_device *dev = ds->priv; in ksz_set_mac_eee()
2780 if (!e->tx_lpi_enabled) { in ksz_set_mac_eee()
2781 dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n"); in ksz_set_mac_eee()
2782 return -EINVAL; in ksz_set_mac_eee()
2785 if (e->tx_lpi_timer) { in ksz_set_mac_eee()
2786 dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n"); in ksz_set_mac_eee()
2787 return -EINVAL; in ksz_set_mac_eee()
2796 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_xmii()
2797 struct ksz_port *p = &dev->ports[port]; in ksz_set_xmii()
2798 const u16 *regs = dev->info->regs; in ksz_set_xmii()
2821 /* On KSZ9893, disable RGMII in-band status support */ in ksz_set_xmii()
2822 if (dev->chip_id == KSZ9893_CHIP_ID || in ksz_set_xmii()
2823 dev->chip_id == KSZ8563_CHIP_ID || in ksz_set_xmii()
2824 dev->chip_id == KSZ9563_CHIP_ID) in ksz_set_xmii()
2828 dev_err(dev->dev, "Unsupported interface '%s' for port %d\n", in ksz_set_xmii()
2833 if (p->rgmii_tx_val) in ksz_set_xmii()
2836 if (p->rgmii_rx_val) in ksz_set_xmii()
2845 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_xmii()
2846 const u16 *regs = dev->info->regs; in ksz_get_xmii()
2880 struct ksz_device *dev = ds->priv; in ksz_phylink_mac_config()
2886 if (dev->info->internal_phy[port]) in ksz_phylink_mac_config()
2890 dev_err(dev->dev, "In-band AN not supported!\n"); in ksz_phylink_mac_config()
2894 ksz_set_xmii(dev, port, state->interface); in ksz_phylink_mac_config()
2896 if (dev->dev_ops->phylink_mac_config) in ksz_phylink_mac_config()
2897 dev->dev_ops->phylink_mac_config(dev, port, mode, state); in ksz_phylink_mac_config()
2899 if (dev->dev_ops->setup_rgmii_delay) in ksz_phylink_mac_config()
2900 dev->dev_ops->setup_rgmii_delay(dev, port); in ksz_phylink_mac_config()
2905 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_gbit()
2906 const u16 *regs = dev->info->regs; in ksz_get_gbit()
2923 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_gbit()
2924 const u16 *regs = dev->info->regs; in ksz_set_gbit()
2942 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_set_100_10mbit()
2943 const u16 *regs = dev->info->regs; in ksz_set_100_10mbit()
2973 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_duplex_flowctrl()
2974 const u32 *masks = dev->info->masks; in ksz_duplex_flowctrl()
2975 const u16 *regs = dev->info->regs; in ksz_duplex_flowctrl()
3005 p = &dev->ports[port]; in ksz9477_phylink_mac_link_up()
3008 if (dev->info->internal_phy[port]) in ksz9477_phylink_mac_link_up()
3011 p->phydev.speed = speed; in ksz9477_phylink_mac_link_up()
3024 struct ksz_device *dev = ds->priv; in ksz_phylink_mac_link_up()
3026 if (dev->dev_ops->phylink_mac_link_up) in ksz_phylink_mac_link_up()
3027 dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, in ksz_phylink_mac_link_up()
3052 dev->chip_id = KSZ8795_CHIP_ID; in ksz_switch_detect()
3056 dev->chip_id = KSZ8765_CHIP_ID; in ksz_switch_detect()
3058 dev->chip_id = KSZ8794_CHIP_ID; in ksz_switch_detect()
3060 return -ENODEV; in ksz_switch_detect()
3065 dev->chip_id = KSZ8830_CHIP_ID; in ksz_switch_detect()
3067 return -ENODEV; in ksz_switch_detect()
3074 dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32); in ksz_switch_detect()
3087 dev->chip_id = id32; in ksz_switch_detect()
3096 dev->chip_id = KSZ8563_CHIP_ID; in ksz_switch_detect()
3098 dev->chip_id = KSZ9563_CHIP_ID; in ksz_switch_detect()
3100 dev->chip_id = KSZ9893_CHIP_ID; in ksz_switch_detect()
3104 dev_err(dev->dev, in ksz_switch_detect()
3106 return -ENODEV; in ksz_switch_detect()
3113 * is converted to Hex-decimal using the successive multiplication method. On
3124 txrate = idle_slope - send_slope; in cinc_cal()
3127 return -EINVAL; in cinc_cal()
3158 struct ksz_device *dev = ds->priv; in ksz_setup_tc_cbs()
3162 if (!dev->info->tc_cbs_supported) in ksz_setup_tc_cbs()
3163 return -EOPNOTSUPP; in ksz_setup_tc_cbs()
3165 if (qopt->queue > dev->info->num_tx_queues) in ksz_setup_tc_cbs()
3166 return -EINVAL; in ksz_setup_tc_cbs()
3169 ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue); in ksz_setup_tc_cbs()
3173 if (!qopt->enable) in ksz_setup_tc_cbs()
3179 qopt->hicredit); in ksz_setup_tc_cbs()
3185 qopt->locredit); in ksz_setup_tc_cbs()
3190 ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw); in ksz_setup_tc_cbs()
3194 if (dev->dev_ops->tc_cbs_set_cinc) { in ksz_setup_tc_cbs()
3195 ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw); in ksz_setup_tc_cbs()
3211 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_disable_egress_rate_limit()
3228 return p->bands - 1 - band; in ksz_ets_band_to_queue()
3277 for (band = 0; band < p->bands; band++) { in ksz_tc_ets_add()
3289 for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) { in ksz_tc_ets_add()
3295 queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]); in ksz_tc_ets_add()
3310 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_tc_ets_del()
3317 switch (dev->info->num_tx_queues) { in ksz_tc_ets_del()
3328 return -EINVAL; in ksz_tc_ets_del()
3331 /* Revert the queue mapping for TC-priority to its default setting on in ksz_tc_ets_del()
3352 if (p->bands != dev->info->num_tx_queues) { in ksz_tc_ets_validate()
3353 dev_err(dev->dev, "Not supported amount of bands. It should be %d\n", in ksz_tc_ets_validate()
3354 dev->info->num_tx_queues); in ksz_tc_ets_validate()
3355 return -EOPNOTSUPP; in ksz_tc_ets_validate()
3358 for (band = 0; band < p->bands; ++band) { in ksz_tc_ets_validate()
3371 if (p->quanta[band]) { in ksz_tc_ets_validate()
3372 dev_err(dev->dev, "Quanta/weights configuration is not supported.\n"); in ksz_tc_ets_validate()
3373 return -EOPNOTSUPP; in ksz_tc_ets_validate()
3383 struct ksz_device *dev = ds->priv; in ksz_tc_setup_qdisc_ets()
3386 if (!dev->info->tc_ets_supported) in ksz_tc_setup_qdisc_ets()
3387 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
3389 if (qopt->parent != TC_H_ROOT) { in ksz_tc_setup_qdisc_ets()
3390 dev_err(dev->dev, "Parent should be \"root\"\n"); in ksz_tc_setup_qdisc_ets()
3391 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
3394 switch (qopt->command) { in ksz_tc_setup_qdisc_ets()
3396 ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
3400 return ksz_tc_ets_add(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
3405 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
3408 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
3420 return -EOPNOTSUPP; in ksz_setup_tc()
3480 ds->dev = base; in ksz_switch_alloc()
3481 ds->num_ports = DSA_MAX_PORTS; in ksz_switch_alloc()
3482 ds->ops = &ksz_switch_ops; in ksz_switch_alloc()
3488 ds->priv = swdev; in ksz_switch_alloc()
3489 swdev->dev = base; in ksz_switch_alloc()
3491 swdev->ds = ds; in ksz_switch_alloc()
3492 swdev->priv = priv; in ksz_switch_alloc()
3501 phy_interface_t phy_mode = dev->ports[port_num].interface; in ksz_parse_rgmii_delay()
3502 int rx_delay = -1, tx_delay = -1; in ksz_parse_rgmii_delay()
3507 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in ksz_parse_rgmii_delay()
3508 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in ksz_parse_rgmii_delay()
3510 if (rx_delay == -1 && tx_delay == -1) { in ksz_parse_rgmii_delay()
3511 dev_warn(dev->dev, in ksz_parse_rgmii_delay()
3512 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in ksz_parse_rgmii_delay()
3513 "please update device tree to specify \"rx-internal-delay-ps\" and " in ksz_parse_rgmii_delay()
3514 "\"tx-internal-delay-ps\"", in ksz_parse_rgmii_delay()
3531 dev->ports[port_num].rgmii_rx_val = rx_delay; in ksz_parse_rgmii_delay()
3532 dev->ports[port_num].rgmii_tx_val = tx_delay; in ksz_parse_rgmii_delay()
3544 if (dev->pdata) in ksz_switch_register()
3545 dev->chip_id = dev->pdata->chip_id; in ksz_switch_register()
3547 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset", in ksz_switch_register()
3549 if (IS_ERR(dev->reset_gpio)) in ksz_switch_register()
3550 return PTR_ERR(dev->reset_gpio); in ksz_switch_register()
3552 if (dev->reset_gpio) { in ksz_switch_register()
3553 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_register()
3555 gpiod_set_value_cansleep(dev->reset_gpio, 0); in ksz_switch_register()
3559 mutex_init(&dev->dev_mutex); in ksz_switch_register()
3560 mutex_init(&dev->regmap_mutex); in ksz_switch_register()
3561 mutex_init(&dev->alu_mutex); in ksz_switch_register()
3562 mutex_init(&dev->vlan_mutex); in ksz_switch_register()
3568 info = ksz_lookup_info(dev->chip_id); in ksz_switch_register()
3570 return -ENODEV; in ksz_switch_register()
3573 dev->info = info; in ksz_switch_register()
3575 dev_info(dev->dev, "found switch: %s, rev %i\n", in ksz_switch_register()
3576 dev->info->dev_name, dev->chip_rev); in ksz_switch_register()
3582 dev->dev_ops = dev->info->ops; in ksz_switch_register()
3584 ret = dev->dev_ops->init(dev); in ksz_switch_register()
3588 dev->ports = devm_kzalloc(dev->dev, in ksz_switch_register()
3589 dev->info->port_cnt * sizeof(struct ksz_port), in ksz_switch_register()
3591 if (!dev->ports) in ksz_switch_register()
3592 return -ENOMEM; in ksz_switch_register()
3594 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_switch_register()
3595 spin_lock_init(&dev->ports[i].mib.stats64_lock); in ksz_switch_register()
3596 mutex_init(&dev->ports[i].mib.cnt_mutex); in ksz_switch_register()
3597 dev->ports[i].mib.counters = in ksz_switch_register()
3598 devm_kzalloc(dev->dev, in ksz_switch_register()
3599 sizeof(u64) * (dev->info->mib_cnt + 1), in ksz_switch_register()
3601 if (!dev->ports[i].mib.counters) in ksz_switch_register()
3602 return -ENOMEM; in ksz_switch_register()
3604 dev->ports[i].ksz_dev = dev; in ksz_switch_register()
3605 dev->ports[i].num = i; in ksz_switch_register()
3609 dev->ds->num_ports = dev->info->port_cnt; in ksz_switch_register()
3614 for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) in ksz_switch_register()
3615 dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; in ksz_switch_register()
3616 if (dev->dev->of_node) { in ksz_switch_register()
3617 ret = of_get_phy_mode(dev->dev->of_node, &interface); in ksz_switch_register()
3619 dev->compat_interface = interface; in ksz_switch_register()
3620 ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); in ksz_switch_register()
3622 ports = of_get_child_by_name(dev->dev->of_node, "ports"); in ksz_switch_register()
3628 if (!(dev->port_mask & BIT(port_num))) { in ksz_switch_register()
3631 return -EINVAL; in ksz_switch_register()
3634 &dev->ports[port_num].interface); in ksz_switch_register()
3640 dev->synclko_125 = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
3641 "microchip,synclko-125"); in ksz_switch_register()
3642 dev->synclko_disable = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
3643 "microchip,synclko-disable"); in ksz_switch_register()
3644 if (dev->synclko_125 && dev->synclko_disable) { in ksz_switch_register()
3645 dev_err(dev->dev, "inconsistent synclko settings\n"); in ksz_switch_register()
3646 return -EINVAL; in ksz_switch_register()
3650 ret = dsa_register_switch(dev->ds); in ksz_switch_register()
3652 dev->dev_ops->exit(dev); in ksz_switch_register()
3657 dev->mib_read_interval = msecs_to_jiffies(5000); in ksz_switch_register()
3660 schedule_delayed_work(&dev->mib_read, 0); in ksz_switch_register()
3669 if (dev->mib_read_interval) { in ksz_switch_remove()
3670 dev->mib_read_interval = 0; in ksz_switch_remove()
3671 cancel_delayed_work_sync(&dev->mib_read); in ksz_switch_remove()
3674 dev->dev_ops->exit(dev); in ksz_switch_remove()
3675 dsa_unregister_switch(dev->ds); in ksz_switch_remove()
3677 if (dev->reset_gpio) in ksz_switch_remove()
3678 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_remove()