Lines Matching refs:dev

221 static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
1600 static int ksz_check_device_id(struct ksz_device *dev) in ksz_check_device_id() argument
1604 dt_chip_data = of_device_get_match_data(dev->dev); in ksz_check_device_id()
1607 if (dt_chip_data->chip_id != dev->chip_id) { in ksz_check_device_id()
1608 dev_err(dev->dev, in ksz_check_device_id()
1610 dt_chip_data->dev_name, dev->info->dev_name); in ksz_check_device_id()
1620 struct ksz_device *dev = ds->priv; in ksz_phylink_get_caps() local
1622 if (dev->info->supports_mii[port]) in ksz_phylink_get_caps()
1625 if (dev->info->supports_rmii[port]) in ksz_phylink_get_caps()
1629 if (dev->info->supports_rgmii[port]) in ksz_phylink_get_caps()
1632 if (dev->info->internal_phy[port]) { in ksz_phylink_get_caps()
1642 if (dev->dev_ops->get_caps) in ksz_phylink_get_caps()
1643 dev->dev_ops->get_caps(dev, port, config); in ksz_phylink_get_caps()
1646 void ksz_r_mib_stats64(struct ksz_device *dev, int port) in ksz_r_mib_stats64() argument
1653 mib = &dev->ports[port].mib; in ksz_r_mib_stats64()
1695 void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port) in ksz88xx_r_mib_stats64() argument
1702 mib = &dev->ports[port].mib; in ksz88xx_r_mib_stats64()
1747 struct ksz_device *dev = ds->priv; in ksz_get_stats64() local
1750 mib = &dev->ports[port].mib; in ksz_get_stats64()
1760 struct ksz_device *dev = ds->priv; in ksz_get_pause_stats() local
1763 mib = &dev->ports[port].mib; in ksz_get_pause_stats()
1773 struct ksz_device *dev = ds->priv; in ksz_get_strings() local
1779 for (i = 0; i < dev->info->mib_cnt; i++) { in ksz_get_strings()
1781 dev->info->mib_names[i].string, ETH_GSTRING_LEN); in ksz_get_strings()
1785 static void ksz_update_port_member(struct ksz_device *dev, int port) in ksz_update_port_member() argument
1787 struct ksz_port *p = &dev->ports[port]; in ksz_update_port_member()
1788 struct dsa_switch *ds = dev->ds; in ksz_update_port_member()
1801 struct ksz_port *other_p = &dev->ports[i]; in ksz_update_port_member()
1829 third_p = &dev->ports[j]; in ksz_update_port_member()
1837 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); in ksz_update_port_member()
1840 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); in ksz_update_port_member()
1845 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_read() local
1849 ret = dev->dev_ops->r_phy(dev, addr, regnum, &val); in ksz_sw_mdio_read()
1859 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_write() local
1861 return dev->dev_ops->w_phy(dev, addr, regnum, val); in ksz_sw_mdio_write()
1864 static int ksz_irq_phy_setup(struct ksz_device *dev) in ksz_irq_phy_setup() argument
1866 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_setup()
1873 irq = irq_find_mapping(dev->ports[phy].pirq.domain, in ksz_irq_phy_setup()
1891 static void ksz_irq_phy_free(struct ksz_device *dev) in ksz_irq_phy_free() argument
1893 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_free()
1901 static int ksz_mdio_register(struct ksz_device *dev) in ksz_mdio_register() argument
1903 struct dsa_switch *ds = dev->ds; in ksz_mdio_register()
1908 mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio"); in ksz_mdio_register()
1912 bus = devm_mdiobus_alloc(ds->dev); in ksz_mdio_register()
1918 bus->priv = dev; in ksz_mdio_register()
1923 bus->parent = ds->dev; in ksz_mdio_register()
1928 if (dev->irq > 0) { in ksz_mdio_register()
1929 ret = ksz_irq_phy_setup(dev); in ksz_mdio_register()
1936 ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np); in ksz_mdio_register()
1938 dev_err(ds->dev, "unable to register MDIO bus %s\n", in ksz_mdio_register()
1940 if (dev->irq > 0) in ksz_mdio_register()
1941 ksz_irq_phy_free(dev); in ksz_mdio_register()
1967 mutex_lock(&kirq->dev->lock_irq); in ksz_irq_bus_lock()
1973 struct ksz_device *dev = kirq->dev; in ksz_irq_bus_sync_unlock() local
1976 ret = ksz_write8(dev, kirq->reg_mask, kirq->masked); in ksz_irq_bus_sync_unlock()
1978 dev_err(dev->dev, "failed to change IRQ mask\n"); in ksz_irq_bus_sync_unlock()
1980 mutex_unlock(&dev->lock_irq); in ksz_irq_bus_sync_unlock()
2024 struct ksz_device *dev; in ksz_irq_thread_fn() local
2030 dev = kirq->dev; in ksz_irq_thread_fn()
2033 ret = ksz_read8(dev, kirq->reg_status, &data); in ksz_irq_thread_fn()
2048 static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq) in ksz_irq_common_setup() argument
2052 kirq->dev = dev; in ksz_irq_common_setup()
2055 kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0, in ksz_irq_common_setup()
2076 static int ksz_girq_setup(struct ksz_device *dev) in ksz_girq_setup() argument
2078 struct ksz_irq *girq = &dev->girq; in ksz_girq_setup()
2080 girq->nirqs = dev->info->port_cnt; in ksz_girq_setup()
2085 girq->irq_num = dev->irq; in ksz_girq_setup()
2087 return ksz_irq_common_setup(dev, girq); in ksz_girq_setup()
2090 static int ksz_pirq_setup(struct ksz_device *dev, u8 p) in ksz_pirq_setup() argument
2092 struct ksz_irq *pirq = &dev->ports[p].pirq; in ksz_pirq_setup()
2094 pirq->nirqs = dev->info->port_nirqs; in ksz_pirq_setup()
2095 pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK); in ksz_pirq_setup()
2096 pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS); in ksz_pirq_setup()
2099 pirq->irq_num = irq_find_mapping(dev->girq.domain, p); in ksz_pirq_setup()
2103 return ksz_irq_common_setup(dev, pirq); in ksz_pirq_setup()
2108 struct ksz_device *dev = ds->priv; in ksz_setup() local
2114 regs = dev->info->regs; in ksz_setup()
2116 dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), in ksz_setup()
2117 dev->info->num_vlans, GFP_KERNEL); in ksz_setup()
2118 if (!dev->vlan_cache) in ksz_setup()
2121 ret = dev->dev_ops->reset(dev); in ksz_setup()
2123 dev_err(ds->dev, "failed to reset switch\n"); in ksz_setup()
2128 regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL], in ksz_setup()
2133 dev->dev_ops->config_cpu_port(ds); in ksz_setup()
2135 dev->dev_ops->enable_stp_addr(dev); in ksz_setup()
2137 ds->num_tx_queues = dev->info->num_tx_queues; in ksz_setup()
2139 regmap_update_bits(ksz_regmap_8(dev), regs[S_MULTICAST_CTRL], in ksz_setup()
2142 ksz_init_mib_timer(dev); in ksz_setup()
2146 if (dev->dev_ops->setup) { in ksz_setup()
2147 ret = dev->dev_ops->setup(ds); in ksz_setup()
2157 p = &dev->ports[dev->cpu_port]; in ksz_setup()
2160 if (dev->irq > 0) { in ksz_setup()
2161 ret = ksz_girq_setup(dev); in ksz_setup()
2165 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_setup()
2166 ret = ksz_pirq_setup(dev, dp->index); in ksz_setup()
2178 dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret); in ksz_setup()
2182 ret = ksz_mdio_register(dev); in ksz_setup()
2184 dev_err(dev->dev, "failed to register the mdio"); in ksz_setup()
2189 regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL], in ksz_setup()
2197 if (dev->irq > 0) in ksz_setup()
2198 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
2201 if (dev->irq > 0) in ksz_setup()
2202 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
2203 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_setup()
2205 if (dev->irq > 0) in ksz_setup()
2206 ksz_irq_free(&dev->girq); in ksz_setup()
2213 struct ksz_device *dev = ds->priv; in ksz_teardown() local
2218 if (dev->irq > 0) { in ksz_teardown()
2219 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_teardown()
2222 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_teardown()
2225 ksz_irq_free(&dev->girq); in ksz_teardown()
2228 if (dev->dev_ops->teardown) in ksz_teardown()
2229 dev->dev_ops->teardown(ds); in ksz_teardown()
2232 static void port_r_cnt(struct ksz_device *dev, int port) in port_r_cnt() argument
2234 struct ksz_port_mib *mib = &dev->ports[port].mib; in port_r_cnt()
2238 while (mib->cnt_ptr < dev->info->reg_mib_cnt) { in port_r_cnt()
2239 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, in port_r_cnt()
2245 dropped = &mib->counters[dev->info->mib_cnt]; in port_r_cnt()
2248 while (mib->cnt_ptr < dev->info->mib_cnt) { in port_r_cnt()
2249 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, in port_r_cnt()
2258 struct ksz_device *dev = container_of(work, struct ksz_device, in ksz_mib_read_work() local
2264 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_mib_read_work()
2265 if (dsa_is_unused_port(dev->ds, i)) in ksz_mib_read_work()
2268 p = &dev->ports[i]; in ksz_mib_read_work()
2276 const struct dsa_port *dp = dsa_to_port(dev->ds, i); in ksz_mib_read_work()
2279 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_mib_read_work()
2281 port_r_cnt(dev, i); in ksz_mib_read_work()
2284 if (dev->dev_ops->r_mib_stat64) in ksz_mib_read_work()
2285 dev->dev_ops->r_mib_stat64(dev, i); in ksz_mib_read_work()
2290 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_mib_read_work()
2293 void ksz_init_mib_timer(struct ksz_device *dev) in ksz_init_mib_timer() argument
2297 INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); in ksz_init_mib_timer()
2299 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_init_mib_timer()
2300 struct ksz_port_mib *mib = &dev->ports[i].mib; in ksz_init_mib_timer()
2302 dev->dev_ops->port_init_cnt(dev, i); in ksz_init_mib_timer()
2305 memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); in ksz_init_mib_timer()
2311 struct ksz_device *dev = ds->priv; in ksz_phy_read16() local
2315 ret = dev->dev_ops->r_phy(dev, addr, reg, &val); in ksz_phy_read16()
2324 struct ksz_device *dev = ds->priv; in ksz_phy_write16() local
2327 ret = dev->dev_ops->w_phy(dev, addr, reg, val); in ksz_phy_write16()
2336 struct ksz_device *dev = ds->priv; in ksz_get_phy_flags() local
2338 switch (dev->chip_id) { in ksz_get_phy_flags()
2367 struct ksz_device *dev = ds->priv; in ksz_mac_link_down() local
2368 struct ksz_port *p = &dev->ports[port]; in ksz_mac_link_down()
2373 if (dev->mib_read_interval) in ksz_mac_link_down()
2374 schedule_delayed_work(&dev->mib_read, 0); in ksz_mac_link_down()
2379 struct ksz_device *dev = ds->priv; in ksz_sset_count() local
2384 return dev->info->mib_cnt; in ksz_sset_count()
2391 struct ksz_device *dev = ds->priv; in ksz_get_ethtool_stats() local
2394 mib = &dev->ports[port].mib; in ksz_get_ethtool_stats()
2399 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_get_ethtool_stats()
2400 port_r_cnt(dev, port); in ksz_get_ethtool_stats()
2401 memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); in ksz_get_ethtool_stats()
2427 struct ksz_device *dev = ds->priv; in ksz_port_fast_age() local
2429 dev->dev_ops->flush_dyn_mac_table(dev, port); in ksz_port_fast_age()
2434 struct ksz_device *dev = ds->priv; in ksz_set_ageing_time() local
2436 if (!dev->dev_ops->set_ageing_time) in ksz_set_ageing_time()
2439 return dev->dev_ops->set_ageing_time(dev, msecs); in ksz_set_ageing_time()
2446 struct ksz_device *dev = ds->priv; in ksz_port_fdb_add() local
2448 if (!dev->dev_ops->fdb_add) in ksz_port_fdb_add()
2451 return dev->dev_ops->fdb_add(dev, port, addr, vid, db); in ksz_port_fdb_add()
2458 struct ksz_device *dev = ds->priv; in ksz_port_fdb_del() local
2460 if (!dev->dev_ops->fdb_del) in ksz_port_fdb_del()
2463 return dev->dev_ops->fdb_del(dev, port, addr, vid, db); in ksz_port_fdb_del()
2469 struct ksz_device *dev = ds->priv; in ksz_port_fdb_dump() local
2471 if (!dev->dev_ops->fdb_dump) in ksz_port_fdb_dump()
2474 return dev->dev_ops->fdb_dump(dev, port, cb, data); in ksz_port_fdb_dump()
2481 struct ksz_device *dev = ds->priv; in ksz_port_mdb_add() local
2483 if (!dev->dev_ops->mdb_add) in ksz_port_mdb_add()
2486 return dev->dev_ops->mdb_add(dev, port, mdb, db); in ksz_port_mdb_add()
2493 struct ksz_device *dev = ds->priv; in ksz_port_mdb_del() local
2495 if (!dev->dev_ops->mdb_del) in ksz_port_mdb_del()
2498 return dev->dev_ops->mdb_del(dev, port, mdb, db); in ksz_port_mdb_del()
2504 struct ksz_device *dev = ds->priv; in ksz_enable_port() local
2510 dev->dev_ops->port_setup(dev, port, false); in ksz_enable_port()
2521 struct ksz_device *dev = ds->priv; in ksz_port_stp_state_set() local
2526 regs = dev->info->regs; in ksz_port_stp_state_set()
2528 ksz_pread8(dev, port, regs[P_STP_CTRL], &data); in ksz_port_stp_state_set()
2531 p = &dev->ports[port]; in ksz_port_stp_state_set()
2554 dev_err(ds->dev, "invalid STP state: %d\n", state); in ksz_port_stp_state_set()
2558 ksz_pwrite8(dev, port, regs[P_STP_CTRL], data); in ksz_port_stp_state_set()
2562 ksz_update_port_member(dev, port); in ksz_port_stp_state_set()
2579 struct ksz_device *dev = ds->priv; in ksz_port_bridge_flags() local
2580 struct ksz_port *p = &dev->ports[port]; in ksz_port_bridge_flags()
2596 struct ksz_device *dev = ds->priv; in ksz_get_tag_protocol() local
2599 if (dev->chip_id == KSZ8795_CHIP_ID || in ksz_get_tag_protocol()
2600 dev->chip_id == KSZ8794_CHIP_ID || in ksz_get_tag_protocol()
2601 dev->chip_id == KSZ8765_CHIP_ID) in ksz_get_tag_protocol()
2604 if (dev->chip_id == KSZ8830_CHIP_ID || in ksz_get_tag_protocol()
2605 dev->chip_id == KSZ8563_CHIP_ID || in ksz_get_tag_protocol()
2606 dev->chip_id == KSZ9893_CHIP_ID || in ksz_get_tag_protocol()
2607 dev->chip_id == KSZ9563_CHIP_ID) in ksz_get_tag_protocol()
2610 if (dev->chip_id == KSZ9477_CHIP_ID || in ksz_get_tag_protocol()
2611 dev->chip_id == KSZ9896_CHIP_ID || in ksz_get_tag_protocol()
2612 dev->chip_id == KSZ9897_CHIP_ID || in ksz_get_tag_protocol()
2613 dev->chip_id == KSZ9567_CHIP_ID) in ksz_get_tag_protocol()
2616 if (is_lan937x(dev)) in ksz_get_tag_protocol()
2644 struct ksz_device *dev = ds->priv; in ksz_port_vlan_filtering() local
2646 if (!dev->dev_ops->vlan_filtering) in ksz_port_vlan_filtering()
2649 return dev->dev_ops->vlan_filtering(dev, port, flag, extack); in ksz_port_vlan_filtering()
2656 struct ksz_device *dev = ds->priv; in ksz_port_vlan_add() local
2658 if (!dev->dev_ops->vlan_add) in ksz_port_vlan_add()
2661 return dev->dev_ops->vlan_add(dev, port, vlan, extack); in ksz_port_vlan_add()
2667 struct ksz_device *dev = ds->priv; in ksz_port_vlan_del() local
2669 if (!dev->dev_ops->vlan_del) in ksz_port_vlan_del()
2672 return dev->dev_ops->vlan_del(dev, port, vlan); in ksz_port_vlan_del()
2679 struct ksz_device *dev = ds->priv; in ksz_port_mirror_add() local
2681 if (!dev->dev_ops->mirror_add) in ksz_port_mirror_add()
2684 return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack); in ksz_port_mirror_add()
2690 struct ksz_device *dev = ds->priv; in ksz_port_mirror_del() local
2692 if (dev->dev_ops->mirror_del) in ksz_port_mirror_del()
2693 dev->dev_ops->mirror_del(dev, port, mirror); in ksz_port_mirror_del()
2698 struct ksz_device *dev = ds->priv; in ksz_change_mtu() local
2700 if (!dev->dev_ops->change_mtu) in ksz_change_mtu()
2703 return dev->dev_ops->change_mtu(dev, port, mtu); in ksz_change_mtu()
2708 struct ksz_device *dev = ds->priv; in ksz_max_mtu() local
2710 switch (dev->chip_id) { in ksz_max_mtu()
2737 struct ksz_device *dev = ds->priv; in ksz_validate_eee() local
2739 if (!dev->info->internal_phy[port]) in ksz_validate_eee()
2742 switch (dev->chip_id) { in ksz_validate_eee()
2779 struct ksz_device *dev = ds->priv; in ksz_set_mac_eee() local
2787 dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n"); in ksz_set_mac_eee()
2792 dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n"); in ksz_set_mac_eee()
2799 static void ksz_set_xmii(struct ksz_device *dev, int port, in ksz_set_xmii() argument
2802 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_xmii()
2803 struct ksz_port *p = &dev->ports[port]; in ksz_set_xmii()
2804 const u16 *regs = dev->info->regs; in ksz_set_xmii()
2807 ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8); in ksz_set_xmii()
2828 if (dev->chip_id == KSZ9893_CHIP_ID || in ksz_set_xmii()
2829 dev->chip_id == KSZ8563_CHIP_ID || in ksz_set_xmii()
2830 dev->chip_id == KSZ9563_CHIP_ID) in ksz_set_xmii()
2834 dev_err(dev->dev, "Unsupported interface '%s' for port %d\n", in ksz_set_xmii()
2846 ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8); in ksz_set_xmii()
2849 phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit) in ksz_get_xmii() argument
2851 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_xmii()
2852 const u16 *regs = dev->info->regs; in ksz_get_xmii()
2857 ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8); in ksz_get_xmii()
2886 struct ksz_device *dev = ds->priv; in ksz_phylink_mac_config() local
2888 if (ksz_is_ksz88x3(dev)) in ksz_phylink_mac_config()
2892 if (dev->info->internal_phy[port]) in ksz_phylink_mac_config()
2896 dev_err(dev->dev, "In-band AN not supported!\n"); in ksz_phylink_mac_config()
2900 ksz_set_xmii(dev, port, state->interface); in ksz_phylink_mac_config()
2902 if (dev->dev_ops->phylink_mac_config) in ksz_phylink_mac_config()
2903 dev->dev_ops->phylink_mac_config(dev, port, mode, state); in ksz_phylink_mac_config()
2905 if (dev->dev_ops->setup_rgmii_delay) in ksz_phylink_mac_config()
2906 dev->dev_ops->setup_rgmii_delay(dev, port); in ksz_phylink_mac_config()
2909 bool ksz_get_gbit(struct ksz_device *dev, int port) in ksz_get_gbit() argument
2911 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_gbit()
2912 const u16 *regs = dev->info->regs; in ksz_get_gbit()
2917 ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8); in ksz_get_gbit()
2927 static void ksz_set_gbit(struct ksz_device *dev, int port, bool gbit) in ksz_set_gbit() argument
2929 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_gbit()
2930 const u16 *regs = dev->info->regs; in ksz_set_gbit()
2933 ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8); in ksz_set_gbit()
2943 ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8); in ksz_set_gbit()
2946 static void ksz_set_100_10mbit(struct ksz_device *dev, int port, int speed) in ksz_set_100_10mbit() argument
2948 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_set_100_10mbit()
2949 const u16 *regs = dev->info->regs; in ksz_set_100_10mbit()
2952 ksz_pread8(dev, port, regs[P_XMII_CTRL_0], &data8); in ksz_set_100_10mbit()
2962 ksz_pwrite8(dev, port, regs[P_XMII_CTRL_0], data8); in ksz_set_100_10mbit()
2965 static void ksz_port_set_xmii_speed(struct ksz_device *dev, int port, int speed) in ksz_port_set_xmii_speed() argument
2968 ksz_set_gbit(dev, port, true); in ksz_port_set_xmii_speed()
2970 ksz_set_gbit(dev, port, false); in ksz_port_set_xmii_speed()
2973 ksz_set_100_10mbit(dev, port, speed); in ksz_port_set_xmii_speed()
2976 static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex, in ksz_duplex_flowctrl() argument
2979 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_duplex_flowctrl()
2980 const u32 *masks = dev->info->masks; in ksz_duplex_flowctrl()
2981 const u16 *regs = dev->info->regs; in ksz_duplex_flowctrl()
2999 ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val); in ksz_duplex_flowctrl()
3002 static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port, in ksz9477_phylink_mac_link_up() argument
3011 p = &dev->ports[port]; in ksz9477_phylink_mac_link_up()
3014 if (dev->info->internal_phy[port]) in ksz9477_phylink_mac_link_up()
3019 ksz_port_set_xmii_speed(dev, port, speed); in ksz9477_phylink_mac_link_up()
3021 ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause); in ksz9477_phylink_mac_link_up()
3030 struct ksz_device *dev = ds->priv; in ksz_phylink_mac_link_up() local
3032 if (dev->dev_ops->phylink_mac_link_up) in ksz_phylink_mac_link_up()
3033 dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, in ksz_phylink_mac_link_up()
3038 static int ksz_switch_detect(struct ksz_device *dev) in ksz_switch_detect() argument
3046 ret = ksz_read16(dev, REG_CHIP_ID0, &id16); in ksz_switch_detect()
3058 dev->chip_id = KSZ8795_CHIP_ID; in ksz_switch_detect()
3060 ksz_read8(dev, KSZ8_PORT_STATUS_0, &val); in ksz_switch_detect()
3062 dev->chip_id = KSZ8765_CHIP_ID; in ksz_switch_detect()
3064 dev->chip_id = KSZ8794_CHIP_ID; in ksz_switch_detect()
3071 dev->chip_id = KSZ8830_CHIP_ID; in ksz_switch_detect()
3076 ret = ksz_read32(dev, REG_CHIP_ID0, &id32); in ksz_switch_detect()
3080 dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32); in ksz_switch_detect()
3093 dev->chip_id = id32; in ksz_switch_detect()
3096 ret = ksz_read8(dev, REG_CHIP_ID4, in ksz_switch_detect()
3102 dev->chip_id = KSZ8563_CHIP_ID; in ksz_switch_detect()
3104 dev->chip_id = KSZ9563_CHIP_ID; in ksz_switch_detect()
3106 dev->chip_id = KSZ9893_CHIP_ID; in ksz_switch_detect()
3110 dev_err(dev->dev, in ksz_switch_detect()
3153 static int ksz_setup_tc_mode(struct ksz_device *dev, int port, u8 scheduler, in ksz_setup_tc_mode() argument
3156 return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0, in ksz_setup_tc_mode()
3164 struct ksz_device *dev = ds->priv; in ksz_setup_tc_cbs() local
3168 if (!dev->info->tc_cbs_supported) in ksz_setup_tc_cbs()
3171 if (qopt->queue > dev->info->num_tx_queues) in ksz_setup_tc_cbs()
3175 ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue); in ksz_setup_tc_cbs()
3180 return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR, in ksz_setup_tc_cbs()
3184 ret = ksz_pwrite16(dev, port, REG_PORT_MTI_HI_WATER_MARK, in ksz_setup_tc_cbs()
3190 ret = ksz_pwrite16(dev, port, REG_PORT_MTI_LO_WATER_MARK, in ksz_setup_tc_cbs()
3200 if (dev->dev_ops->tc_cbs_set_cinc) { in ksz_setup_tc_cbs()
3201 ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw); in ksz_setup_tc_cbs()
3206 return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO, in ksz_setup_tc_cbs()
3210 static int ksz_disable_egress_rate_limit(struct ksz_device *dev, int port) in ksz_disable_egress_rate_limit() argument
3217 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_disable_egress_rate_limit()
3218 ret = ksz_pwrite8(dev, port, KSZ9477_REG_PORT_OUT_RATE_0 + queue, in ksz_disable_egress_rate_limit()
3237 static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue) in ksz_queue_set_strict() argument
3241 ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue); in ksz_queue_set_strict()
3245 return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO, in ksz_queue_set_strict()
3249 static int ksz_queue_set_wrr(struct ksz_device *dev, int port, int queue, in ksz_queue_set_wrr() argument
3254 ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue); in ksz_queue_set_wrr()
3258 ret = ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR, in ksz_queue_set_wrr()
3263 return ksz_pwrite8(dev, port, KSZ9477_PORT_MTI_QUEUE_CTRL_1, weight); in ksz_queue_set_wrr()
3266 static int ksz_tc_ets_add(struct ksz_device *dev, int port, in ksz_tc_ets_add() argument
3276 ret = ksz_disable_egress_rate_limit(dev, port); in ksz_tc_ets_add()
3286 ret = ksz_queue_set_strict(dev, port, queue); in ksz_tc_ets_add()
3305 return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map); in ksz_tc_ets_add()
3308 static int ksz_tc_ets_del(struct ksz_device *dev, int port) in ksz_tc_ets_del() argument
3316 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_tc_ets_del()
3317 ret = ksz_queue_set_wrr(dev, port, queue, in ksz_tc_ets_del()
3323 switch (dev->info->num_tx_queues) { in ksz_tc_ets_del()
3347 return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map); in ksz_tc_ets_del()
3350 static int ksz_tc_ets_validate(struct ksz_device *dev, int port, in ksz_tc_ets_validate() argument
3358 if (p->bands != dev->info->num_tx_queues) { in ksz_tc_ets_validate()
3359 dev_err(dev->dev, "Not supported amount of bands. It should be %d\n", in ksz_tc_ets_validate()
3360 dev->info->num_tx_queues); in ksz_tc_ets_validate()
3378 dev_err(dev->dev, "Quanta/weights configuration is not supported.\n"); in ksz_tc_ets_validate()
3389 struct ksz_device *dev = ds->priv; in ksz_tc_setup_qdisc_ets() local
3392 if (!dev->info->tc_ets_supported) in ksz_tc_setup_qdisc_ets()
3396 dev_err(dev->dev, "Parent should be \"root\"\n"); in ksz_tc_setup_qdisc_ets()
3402 ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
3406 return ksz_tc_ets_add(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
3408 return ksz_tc_ets_del(dev, port); in ksz_tc_setup_qdisc_ets()
3486 ds->dev = base; in ksz_switch_alloc()
3495 swdev->dev = base; in ksz_switch_alloc()
3504 static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num, in ksz_parse_rgmii_delay() argument
3507 phy_interface_t phy_mode = dev->ports[port_num].interface; in ksz_parse_rgmii_delay()
3517 dev_warn(dev->dev, in ksz_parse_rgmii_delay()
3537 dev->ports[port_num].rgmii_rx_val = rx_delay; in ksz_parse_rgmii_delay()
3538 dev->ports[port_num].rgmii_tx_val = tx_delay; in ksz_parse_rgmii_delay()
3541 int ksz_switch_register(struct ksz_device *dev) in ksz_switch_register() argument
3550 if (dev->pdata) in ksz_switch_register()
3551 dev->chip_id = dev->pdata->chip_id; in ksz_switch_register()
3553 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset", in ksz_switch_register()
3555 if (IS_ERR(dev->reset_gpio)) in ksz_switch_register()
3556 return PTR_ERR(dev->reset_gpio); in ksz_switch_register()
3558 if (dev->reset_gpio) { in ksz_switch_register()
3559 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_register()
3561 gpiod_set_value_cansleep(dev->reset_gpio, 0); in ksz_switch_register()
3565 mutex_init(&dev->dev_mutex); in ksz_switch_register()
3566 mutex_init(&dev->regmap_mutex); in ksz_switch_register()
3567 mutex_init(&dev->alu_mutex); in ksz_switch_register()
3568 mutex_init(&dev->vlan_mutex); in ksz_switch_register()
3570 ret = ksz_switch_detect(dev); in ksz_switch_register()
3574 info = ksz_lookup_info(dev->chip_id); in ksz_switch_register()
3579 dev->info = info; in ksz_switch_register()
3581 dev_info(dev->dev, "found switch: %s, rev %i\n", in ksz_switch_register()
3582 dev->info->dev_name, dev->chip_rev); in ksz_switch_register()
3584 ret = ksz_check_device_id(dev); in ksz_switch_register()
3588 dev->dev_ops = dev->info->ops; in ksz_switch_register()
3590 ret = dev->dev_ops->init(dev); in ksz_switch_register()
3594 dev->ports = devm_kzalloc(dev->dev, in ksz_switch_register()
3595 dev->info->port_cnt * sizeof(struct ksz_port), in ksz_switch_register()
3597 if (!dev->ports) in ksz_switch_register()
3600 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_switch_register()
3601 spin_lock_init(&dev->ports[i].mib.stats64_lock); in ksz_switch_register()
3602 mutex_init(&dev->ports[i].mib.cnt_mutex); in ksz_switch_register()
3603 dev->ports[i].mib.counters = in ksz_switch_register()
3604 devm_kzalloc(dev->dev, in ksz_switch_register()
3605 sizeof(u64) * (dev->info->mib_cnt + 1), in ksz_switch_register()
3607 if (!dev->ports[i].mib.counters) in ksz_switch_register()
3610 dev->ports[i].ksz_dev = dev; in ksz_switch_register()
3611 dev->ports[i].num = i; in ksz_switch_register()
3615 dev->ds->num_ports = dev->info->port_cnt; in ksz_switch_register()
3620 for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) in ksz_switch_register()
3621 dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; in ksz_switch_register()
3622 if (dev->dev->of_node) { in ksz_switch_register()
3623 ret = of_get_phy_mode(dev->dev->of_node, &interface); in ksz_switch_register()
3625 dev->compat_interface = interface; in ksz_switch_register()
3626 ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); in ksz_switch_register()
3628 ports = of_get_child_by_name(dev->dev->of_node, "ports"); in ksz_switch_register()
3634 if (!(dev->port_mask & BIT(port_num))) { in ksz_switch_register()
3640 &dev->ports[port_num].interface); in ksz_switch_register()
3642 ksz_parse_rgmii_delay(dev, port_num, port); in ksz_switch_register()
3646 dev->synclko_125 = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
3648 dev->synclko_disable = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
3650 if (dev->synclko_125 && dev->synclko_disable) { in ksz_switch_register()
3651 dev_err(dev->dev, "inconsistent synclko settings\n"); in ksz_switch_register()
3656 ret = dsa_register_switch(dev->ds); in ksz_switch_register()
3658 dev->dev_ops->exit(dev); in ksz_switch_register()
3663 dev->mib_read_interval = msecs_to_jiffies(5000); in ksz_switch_register()
3666 schedule_delayed_work(&dev->mib_read, 0); in ksz_switch_register()
3672 void ksz_switch_remove(struct ksz_device *dev) in ksz_switch_remove() argument
3675 if (dev->mib_read_interval) { in ksz_switch_remove()
3676 dev->mib_read_interval = 0; in ksz_switch_remove()
3677 cancel_delayed_work_sync(&dev->mib_read); in ksz_switch_remove()
3680 dev->dev_ops->exit(dev); in ksz_switch_remove()
3681 dsa_unregister_switch(dev->ds); in ksz_switch_remove()
3683 if (dev->reset_gpio) in ksz_switch_remove()
3684 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_remove()