Lines Matching +full:speed +full:- +full:bin

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
18 #include <linux/pcs/pcs-xpcs.h>
75 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_is_vlan_configured()
76 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; in sja1105_is_vlan_configured()
83 return -1; in sja1105_is_vlan_configured()
88 struct sja1105_private *priv = ds->priv; in sja1105_drop_untagged()
91 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_drop_untagged()
106 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_pvid_apply()
121 struct sja1105_private *priv = ds->priv; in sja1105_commit_pvid()
128 pvid = priv->bridge_pvid[port]; in sja1105_commit_pvid()
130 pvid = priv->tag_8021q_pvid[port]; in sja1105_commit_pvid()
137 * VLAN-aware bridge. When the tag_8021q pvid is used, we are in sja1105_commit_pvid()
143 if (pvid == priv->bridge_pvid[port]) { in sja1105_commit_pvid()
144 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_commit_pvid()
162 * Every queue i holds top[i] - base[i] frames. in sja1105_init_mac_settings()
163 * Sum of top[i] - base[i] is 511 (max hardware limit). in sja1105_init_mac_settings()
170 /* Always put the MAC speed in automatic mode, where it can be in sja1105_init_mac_settings()
173 .speed = priv->info->port_speed[SJA1105_SPEED_AUTO], in sja1105_init_mac_settings()
174 /* No static correction for 1-step 1588 events */ in sja1105_init_mac_settings()
186 /* Don't drop double-tagged traffic */ in sja1105_init_mac_settings()
192 /* Disable learning and I/O on user ports by default - in sja1105_init_mac_settings()
200 struct dsa_switch *ds = priv->ds; in sja1105_init_mac_settings()
204 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; in sja1105_init_mac_settings()
207 if (table->entry_count) { in sja1105_init_mac_settings()
208 kfree(table->entries); in sja1105_init_mac_settings()
209 table->entry_count = 0; in sja1105_init_mac_settings()
212 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mac_settings()
213 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mac_settings()
214 if (!table->entries) in sja1105_init_mac_settings()
215 return -ENOMEM; in sja1105_init_mac_settings()
217 table->entry_count = table->ops->max_entry_count; in sja1105_init_mac_settings()
219 mac = table->entries; in sja1105_init_mac_settings()
221 list_for_each_entry(dp, &ds->dst->ports, list) { in sja1105_init_mac_settings()
222 if (dp->ds != ds) in sja1105_init_mac_settings()
225 mac[dp->index] = default_mac; in sja1105_init_mac_settings()
228 * enabled for the DSA ports. CPU ports use software-assisted in sja1105_init_mac_settings()
231 * CPU ports in a cross-chip topology if multiple CPU ports in sja1105_init_mac_settings()
235 dp->learning = true; in sja1105_init_mac_settings()
241 mac[dp->index].drpuntag = true; in sja1105_init_mac_settings()
249 struct device *dev = &priv->spidev->dev; in sja1105_init_mii_settings()
251 struct dsa_switch *ds = priv->ds; in sja1105_init_mii_settings()
255 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; in sja1105_init_mii_settings()
258 if (table->entry_count) { in sja1105_init_mii_settings()
259 kfree(table->entries); in sja1105_init_mii_settings()
260 table->entry_count = 0; in sja1105_init_mii_settings()
263 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mii_settings()
264 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mii_settings()
265 if (!table->entries) in sja1105_init_mii_settings()
266 return -ENOMEM; in sja1105_init_mii_settings()
269 table->entry_count = table->ops->max_entry_count; in sja1105_init_mii_settings()
271 mii = table->entries; in sja1105_init_mii_settings()
273 for (i = 0; i < ds->num_ports; i++) { in sja1105_init_mii_settings()
276 if (dsa_is_unused_port(priv->ds, i)) in sja1105_init_mii_settings()
279 switch (priv->phy_mode[i]) { in sja1105_init_mii_settings()
281 if (priv->info->internal_phy[i] == SJA1105_NO_PHY) in sja1105_init_mii_settings()
284 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
285 if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX) in sja1105_init_mii_settings()
286 mii->special[i] = true; in sja1105_init_mii_settings()
293 if (!priv->info->supports_mii[i]) in sja1105_init_mii_settings()
296 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
302 if (!priv->info->supports_rmii[i]) in sja1105_init_mii_settings()
305 mii->xmii_mode[i] = XMII_MODE_RMII; in sja1105_init_mii_settings()
311 if (!priv->info->supports_rgmii[i]) in sja1105_init_mii_settings()
314 mii->xmii_mode[i] = XMII_MODE_RGMII; in sja1105_init_mii_settings()
317 if (!priv->info->supports_sgmii[i]) in sja1105_init_mii_settings()
320 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
321 mii->special[i] = true; in sja1105_init_mii_settings()
324 if (!priv->info->supports_2500basex[i]) in sja1105_init_mii_settings()
327 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
328 mii->special[i] = true; in sja1105_init_mii_settings()
333 phy_modes(priv->phy_mode[i]), i); in sja1105_init_mii_settings()
334 return -EINVAL; in sja1105_init_mii_settings()
337 mii->phy_mac[i] = role; in sja1105_init_mii_settings()
348 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_init_static_fdb()
351 * entries, except for a special entry at the end which is a catch-all in sja1105_init_static_fdb()
354 if (table->entry_count) { in sja1105_init_static_fdb()
355 kfree(table->entries); in sja1105_init_static_fdb()
356 table->entry_count = 0; in sja1105_init_static_fdb()
359 if (!priv->info->can_limit_mcast_flood) in sja1105_init_static_fdb()
362 table->entries = kcalloc(1, table->ops->unpacked_entry_size, in sja1105_init_static_fdb()
364 if (!table->entries) in sja1105_init_static_fdb()
365 return -ENOMEM; in sja1105_init_static_fdb()
367 table->entry_count = 1; in sja1105_init_static_fdb()
368 l2_lookup = table->entries; in sja1105_init_static_fdb()
374 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1; in sja1105_init_static_fdb()
377 for (port = 0; port < priv->ds->num_ports; port++) in sja1105_init_static_fdb()
378 if (!dsa_is_unused_port(priv->ds, port)) in sja1105_init_static_fdb()
389 /* All entries within a FDB bin are available for learning */ in sja1105_init_l2_lookup_params()
397 /* Don't discard management traffic based on ENFPORT - in sja1105_init_l2_lookup_params()
414 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_lookup_params()
419 for (port = 0; port < ds->num_ports; port++) in sja1105_init_l2_lookup_params()
425 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_lookup_params()
432 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_init_l2_lookup_params()
434 if (table->entry_count) { in sja1105_init_l2_lookup_params()
435 kfree(table->entries); in sja1105_init_l2_lookup_params()
436 table->entry_count = 0; in sja1105_init_l2_lookup_params()
439 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_lookup_params()
440 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_lookup_params()
441 if (!table->entries) in sja1105_init_l2_lookup_params()
442 return -ENOMEM; in sja1105_init_l2_lookup_params()
444 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_lookup_params()
447 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = in sja1105_init_l2_lookup_params()
455 * All DT-defined ports are members of this VLAN, and there are no
472 struct dsa_switch *ds = priv->ds; in sja1105_init_static_vlan()
475 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_init_static_vlan()
477 if (table->entry_count) { in sja1105_init_static_vlan()
478 kfree(table->entries); in sja1105_init_static_vlan()
479 table->entry_count = 0; in sja1105_init_static_vlan()
482 table->entries = kzalloc(table->ops->unpacked_entry_size, in sja1105_init_static_vlan()
484 if (!table->entries) in sja1105_init_static_vlan()
485 return -ENOMEM; in sja1105_init_static_vlan()
487 table->entry_count = 1; in sja1105_init_static_vlan()
489 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_static_vlan()
498 priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
499 priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
503 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; in sja1105_init_static_vlan()
510 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_forwarding()
517 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; in sja1105_init_l2_forwarding()
519 if (table->entry_count) { in sja1105_init_l2_forwarding()
520 kfree(table->entries); in sja1105_init_l2_forwarding()
521 table->entry_count = 0; in sja1105_init_l2_forwarding()
524 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding()
525 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding()
526 if (!table->entries) in sja1105_init_l2_forwarding()
527 return -ENOMEM; in sja1105_init_l2_forwarding()
529 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding()
531 l2fwd = table->entries; in sja1105_init_l2_forwarding()
537 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
546 * only to the always-on domain (CPU port and DSA links) in sja1105_init_l2_forwarding()
548 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
552 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
565 * always-on domain). These can send packets to any enabled port except in sja1105_init_l2_forwarding()
568 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
572 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
594 dst = ds->dst; in sja1105_init_l2_forwarding()
596 list_for_each_entry(dl, &dst->rtable, list) { in sja1105_init_l2_forwarding()
597 if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp) in sja1105_init_l2_forwarding()
600 from = dl->dp->index; in sja1105_init_l2_forwarding()
603 dev_warn(ds->dev, in sja1105_init_l2_forwarding()
616 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
620 priv->ucast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
621 priv->bcast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
625 * Create a one-to-one mapping. in sja1105_init_l2_forwarding()
628 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
632 l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc; in sja1105_init_l2_forwarding()
635 l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true; in sja1105_init_l2_forwarding()
644 struct dsa_switch *ds = priv->ds; in sja1110_init_pcp_remapping()
648 table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING]; in sja1110_init_pcp_remapping()
651 if (!table->ops->max_entry_count) in sja1110_init_pcp_remapping()
654 if (table->entry_count) { in sja1110_init_pcp_remapping()
655 kfree(table->entries); in sja1110_init_pcp_remapping()
656 table->entry_count = 0; in sja1110_init_pcp_remapping()
659 table->entries = kcalloc(table->ops->max_entry_count, in sja1110_init_pcp_remapping()
660 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1110_init_pcp_remapping()
661 if (!table->entries) in sja1110_init_pcp_remapping()
662 return -ENOMEM; in sja1110_init_pcp_remapping()
664 table->entry_count = table->ops->max_entry_count; in sja1110_init_pcp_remapping()
666 pcp_remap = table->entries; in sja1110_init_pcp_remapping()
669 for (port = 0; port < ds->num_ports; port++) { in sja1110_init_pcp_remapping()
685 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_init_l2_forwarding_params()
687 if (table->entry_count) { in sja1105_init_l2_forwarding_params()
688 kfree(table->entries); in sja1105_init_l2_forwarding_params()
689 table->entry_count = 0; in sja1105_init_l2_forwarding_params()
692 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding_params()
693 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding_params()
694 if (!table->entries) in sja1105_init_l2_forwarding_params()
695 return -ENOMEM; in sja1105_init_l2_forwarding_params()
697 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding_params()
700 l2fwd_params = table->entries; in sja1105_init_l2_forwarding_params()
703 l2fwd_params->max_dynp = 0; in sja1105_init_l2_forwarding_params()
705 l2fwd_params->part_spc[0] = priv->info->max_frame_mem; in sja1105_init_l2_forwarding_params()
716 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
717 l2_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
718 l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
720 /* If we have any critical-traffic virtual links, we need to reserve in sja1105_frame_memory_partitioning()
723 * remaining for best-effort traffic. TODO: figure out a more flexible in sja1105_frame_memory_partitioning()
726 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count) in sja1105_frame_memory_partitioning()
729 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
730 vl_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
732 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
733 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
739 * -----+----------------+---------------+---------------+---------------
741 * 1 |0, [5:10], retag| [1:2] | [3:4] | -
742 * 2 | 0, [5:10] | [1:3], retag | 4 | -
743 * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
744 * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
745 * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
746 * 14 | 0, [5:10] | [1:4], retag | - | -
747 * 15 | [5:10] | [0:4], retag | - | -
758 if (priv->info->device_id != SJA1110_DEVICE_ID) in sja1110_select_tdmaconfigidx()
761 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1110_select_tdmaconfigidx()
762 general_params = table->entries; in sja1110_select_tdmaconfigidx()
767 port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL; in sja1110_select_tdmaconfigidx()
768 port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
769 port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
787 general_params->tdmaconfigidx = tdmaconfigidx; in sja1110_select_tdmaconfigidx()
793 struct dsa_switch *ds = priv->ds; in sja1105_init_topology()
800 general_params->host_port = ds->num_ports; in sja1105_init_topology()
802 /* Link-local traffic received on casc_port will be forwarded in sja1105_init_topology()
812 if (!priv->info->multiple_cascade_ports) in sja1105_init_topology()
813 general_params->casc_port = ds->num_ports; in sja1105_init_topology()
815 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_topology()
820 * upstream-facing DSA links in sja1105_init_topology()
823 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
824 general_params->host_port = port; in sja1105_init_topology()
826 dev_err(ds->dev, in sja1105_init_topology()
828 general_params->host_port, port); in sja1105_init_topology()
829 return -EINVAL; in sja1105_init_topology()
833 /* Cascade ports are downstream-facing DSA links */ in sja1105_init_topology()
835 if (priv->info->multiple_cascade_ports) { in sja1105_init_topology()
836 general_params->casc_port |= BIT(port); in sja1105_init_topology()
837 } else if (general_params->casc_port == ds->num_ports) { in sja1105_init_topology()
838 general_params->casc_port = port; in sja1105_init_topology()
840 dev_err(ds->dev, in sja1105_init_topology()
842 general_params->casc_port, port); in sja1105_init_topology()
843 return -EINVAL; in sja1105_init_topology()
848 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
849 dev_err(ds->dev, "No host port configured\n"); in sja1105_init_topology()
850 return -EINVAL; in sja1105_init_topology()
861 .switchid = priv->ds->index, in sja1105_init_general_params()
862 /* Priority queue for link-local management frames in sja1105_init_general_params()
863 * (both ingress to and egress from CPU - PTP, STP etc) in sja1105_init_general_params()
875 .mirr_port = priv->ds->num_ports, in sja1105_init_general_params()
880 /* Only update correctionField for 1-step PTP (L2 transport) */ in sja1105_init_general_params()
900 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_init_general_params()
902 if (table->entry_count) { in sja1105_init_general_params()
903 kfree(table->entries); in sja1105_init_general_params()
904 table->entry_count = 0; in sja1105_init_general_params()
907 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_general_params()
908 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_general_params()
909 if (!table->entries) in sja1105_init_general_params()
910 return -ENOMEM; in sja1105_init_general_params()
912 table->entry_count = table->ops->max_entry_count; in sja1105_init_general_params()
914 general_params = table->entries; in sja1105_init_general_params()
929 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; in sja1105_init_avb_params()
932 if (table->entry_count) { in sja1105_init_avb_params()
933 kfree(table->entries); in sja1105_init_avb_params()
934 table->entry_count = 0; in sja1105_init_avb_params()
937 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_avb_params()
938 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_avb_params()
939 if (!table->entries) in sja1105_init_avb_params()
940 return -ENOMEM; in sja1105_init_avb_params()
942 table->entry_count = table->ops->max_entry_count; in sja1105_init_avb_params()
944 avb = table->entries; in sja1105_init_avb_params()
947 avb->destmeta = SJA1105_META_DMAC; in sja1105_init_avb_params()
948 avb->srcmeta = SJA1105_META_SMAC; in sja1105_init_avb_params()
956 avb->cas_master = false; in sja1105_init_avb_params()
961 /* The L2 policing table is 2-stage. The table is looked up for each frame
969 * +------------+--------+ +---------------------------------+
971 * +------------+--------+ +---------------------------------+
973 * +------------+--------+ +---------------------------------+
975 * +------------+--------+ +---------------------------------+
977 * +------------+--------+ +---------------------------------+
979 * +------------+--------+ +---------------------------------+
981 * +------------+--------+ +---------------------------------+
983 * +------------+--------+ +---------------------------------+
985 * +------------+--------+ +---------------------------------+
987 * +------------+--------+
989 * +------------+--------+
991 * +------------+--------+
993 * +------------+--------+ +---------------------------------+
995 * +------------+--------+ +---------------------------------+
997 * In this driver, we shall use policers 0-4 as statically alocated port
1004 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) argument
1009 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_policing()
1013 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; in sja1105_init_l2_policing()
1016 if (table->entry_count) { in sja1105_init_l2_policing()
1017 kfree(table->entries); in sja1105_init_l2_policing()
1018 table->entry_count = 0; in sja1105_init_l2_policing()
1021 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_policing()
1022 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_policing()
1023 if (!table->entries) in sja1105_init_l2_policing()
1024 return -ENOMEM; in sja1105_init_l2_policing()
1026 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_policing()
1028 policing = table->entries; in sja1105_init_l2_policing()
1031 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1032 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port; in sja1105_init_l2_policing()
1033 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port; in sja1105_init_l2_policing()
1040 if (mcast < table->ops->max_entry_count) in sja1105_init_l2_policing()
1045 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1064 sja1105_static_config_free(&priv->static_config); in sja1105_static_config_load()
1065 rc = sja1105_static_config_init(&priv->static_config, in sja1105_static_config_load()
1066 priv->info->static_ops, in sja1105_static_config_load()
1067 priv->info->device_id); in sja1105_static_config_load()
1111 * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
1116 * Previously we were acting upon the "phy-mode" property when we were
1117 * operating in fixed-link, basically acting as a PHY, but with a reversed
1123 * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
1124 * back to the legacy behavior and apply delays on fixed-link ports based on
1125 * the reverse interpretation of the phy-mode. This is a deviation from the
1128 * "{rx,tx}-internal-delay-ps" with a value of 0.
1133 phy_interface_t phy_mode = priv->phy_mode[port]; in sja1105_parse_rgmii_delays()
1134 struct device *dev = &priv->spidev->dev; in sja1105_parse_rgmii_delays()
1135 int rx_delay = -1, tx_delay = -1; in sja1105_parse_rgmii_delays()
1140 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in sja1105_parse_rgmii_delays()
1141 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in sja1105_parse_rgmii_delays()
1143 if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) { in sja1105_parse_rgmii_delays()
1145 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in sja1105_parse_rgmii_delays()
1146 "please update device tree to specify \"rx-internal-delay-ps\" and " in sja1105_parse_rgmii_delays()
1147 "\"tx-internal-delay-ps\"", in sja1105_parse_rgmii_delays()
1164 if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) { in sja1105_parse_rgmii_delays()
1166 return -EINVAL; in sja1105_parse_rgmii_delays()
1176 return -ERANGE; in sja1105_parse_rgmii_delays()
1179 priv->rgmii_rx_delay_ps[port] = rx_delay; in sja1105_parse_rgmii_delays()
1180 priv->rgmii_tx_delay_ps[port] = tx_delay; in sja1105_parse_rgmii_delays()
1188 struct device *dev = &priv->spidev->dev; in sja1105_parse_ports_node()
1202 return -ENODEV; in sja1105_parse_ports_node()
1208 dev_err(dev, "Failed to read phy-mode or " in sja1105_parse_ports_node()
1209 "phy-interface-type property for port %d\n", in sja1105_parse_ports_node()
1212 return -ENODEV; in sja1105_parse_ports_node()
1215 phy_node = of_parse_phandle(child, "phy-handle", 0); in sja1105_parse_ports_node()
1218 dev_err(dev, "phy-handle or fixed-link " in sja1105_parse_ports_node()
1221 return -ENODEV; in sja1105_parse_ports_node()
1223 /* phy-handle is missing, but fixed-link isn't. in sja1105_parse_ports_node()
1226 priv->fixed_link[index] = true; in sja1105_parse_ports_node()
1231 priv->phy_mode[index] = phy_mode; in sja1105_parse_ports_node()
1245 struct device *dev = &priv->spidev->dev; in sja1105_parse_dt()
1246 struct device_node *switch_node = dev->of_node; in sja1105_parse_dt()
1252 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); in sja1105_parse_dt()
1255 return -ENODEV; in sja1105_parse_dt()
1264 /* Convert link speed from SJA1105 to ethtool encoding */
1266 u64 speed) in sja1105_port_speed_to_ethtool() argument
1268 if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) in sja1105_port_speed_to_ethtool()
1270 if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) in sja1105_port_speed_to_ethtool()
1272 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) in sja1105_port_speed_to_ethtool()
1274 if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS]) in sja1105_port_speed_to_ethtool()
1279 /* Set link speed in the MAC configuration for a specific port. */
1284 struct device *dev = priv->ds->dev; in sja1105_adjust_port_config()
1285 u64 speed; in sja1105_adjust_port_config() local
1294 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_adjust_port_config()
1299 * the state->interface, but AN has not completed and the in sja1105_adjust_port_config()
1300 * speed is not yet valid. UM10944.pdf says that setting in sja1105_adjust_port_config()
1302 * ok for power consumption in case AN will never complete - in sja1105_adjust_port_config()
1305 speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_adjust_port_config()
1308 speed = priv->info->port_speed[SJA1105_SPEED_10MBPS]; in sja1105_adjust_port_config()
1311 speed = priv->info->port_speed[SJA1105_SPEED_100MBPS]; in sja1105_adjust_port_config()
1314 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_adjust_port_config()
1317 speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_adjust_port_config()
1320 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); in sja1105_adjust_port_config()
1321 return -EINVAL; in sja1105_adjust_port_config()
1331 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII) in sja1105_adjust_port_config()
1332 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_adjust_port_config()
1333 else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_adjust_port_config()
1334 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_adjust_port_config()
1336 mac[port].speed = speed; in sja1105_adjust_port_config()
1352 if (!phy_interface_mode_is_rgmii(priv->phy_mode[port])) in sja1105_adjust_port_config()
1361 struct sja1105_private *priv = ds->priv; in sja1105_mac_select_pcs()
1362 struct dw_xpcs *xpcs = priv->xpcs[port]; in sja1105_mac_select_pcs()
1365 return &xpcs->pcs; in sja1105_mac_select_pcs()
1374 sja1105_inhibit_tx(ds->priv, BIT(port), true); in sja1105_mac_link_down()
1381 int speed, int duplex, in sja1105_mac_link_up() argument
1384 struct sja1105_private *priv = ds->priv; in sja1105_mac_link_up()
1386 sja1105_adjust_port_config(priv, port, speed); in sja1105_mac_link_up()
1394 struct sja1105_private *priv = ds->priv; in sja1105_phylink_get_caps()
1398 phy_mode = priv->phy_mode[port]; in sja1105_phylink_get_caps()
1403 * changes between SGMII and 2500base-X. in sja1105_phylink_get_caps()
1405 if (priv->info->supports_sgmii[port]) in sja1105_phylink_get_caps()
1407 config->supported_interfaces); in sja1105_phylink_get_caps()
1409 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1411 config->supported_interfaces); in sja1105_phylink_get_caps()
1417 __set_bit(phy_mode, config->supported_interfaces); in sja1105_phylink_get_caps()
1421 * support half-duplex traffic modes. in sja1105_phylink_get_caps()
1423 config->mac_capabilities = MAC_10FD | MAC_100FD; in sja1105_phylink_get_caps()
1425 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; in sja1105_phylink_get_caps()
1426 if (mii->xmii_mode[port] == XMII_MODE_RGMII || in sja1105_phylink_get_caps()
1427 mii->xmii_mode[port] == XMII_MODE_SGMII) in sja1105_phylink_get_caps()
1428 config->mac_capabilities |= MAC_1000FD; in sja1105_phylink_get_caps()
1430 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1431 config->mac_capabilities |= MAC_2500FD; in sja1105_phylink_get_caps()
1442 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_find_static_fdb_entry()
1443 l2_lookup = table->entries; in sja1105_find_static_fdb_entry()
1445 for (i = 0; i < table->entry_count; i++) in sja1105_find_static_fdb_entry()
1446 if (l2_lookup[i].macaddr == requested->macaddr && in sja1105_find_static_fdb_entry()
1447 l2_lookup[i].vlanid == requested->vlanid && in sja1105_find_static_fdb_entry()
1451 return -1; in sja1105_find_static_fdb_entry()
1468 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_static_fdb_change()
1477 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_static_fdb_change()
1481 match = table->entry_count - 1; in sja1105_static_fdb_change()
1485 l2_lookup = table->entries; in sja1105_static_fdb_change()
1501 l2_lookup[match] = l2_lookup[table->entry_count - 1]; in sja1105_static_fdb_change()
1502 return sja1105_table_resize(table, table->entry_count - 1); in sja1105_static_fdb_change()
1505 /* First-generation switches have a 4-way set associative TCAM that
1507 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1508 * For the placement of a newly learnt FDB entry, the switch selects the bin
1509 * based on a hash function, and the way within that bin incrementally.
1511 static int sja1105et_fdb_index(int bin, int way) in sja1105et_fdb_index() argument
1513 return bin * SJA1105ET_FDB_BIN_SIZE + way; in sja1105et_fdb_index()
1516 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, in sja1105et_is_fdb_entry_in_bin() argument
1525 int index = sja1105et_fdb_index(bin, way); in sja1105et_is_fdb_entry_in_bin()
1545 return -1; in sja1105et_is_fdb_entry_in_bin()
1552 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_add()
1553 struct device *dev = ds->dev; in sja1105et_fdb_add()
1554 int last_unused = -1; in sja1105et_fdb_add()
1556 int bin, way, rc; in sja1105et_fdb_add() local
1558 bin = sja1105et_fdb_hash(priv, addr, vid); in sja1105et_fdb_add()
1560 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, in sja1105et_fdb_add()
1571 int index = sja1105et_fdb_index(bin, way); in sja1105et_fdb_add()
1583 /* Bin is full, need to evict somebody. in sja1105et_fdb_add()
1587 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly in sja1105et_fdb_add()
1591 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", in sja1105et_fdb_add()
1592 bin, addr, way); in sja1105et_fdb_add()
1599 l2_lookup.index = sja1105et_fdb_index(bin, way); in sja1105et_fdb_add()
1608 start = sja1105et_fdb_index(bin, 0); in sja1105et_fdb_add()
1609 end = sja1105et_fdb_index(bin, way); in sja1105et_fdb_add()
1614 if (rc == -ENOENT) in sja1105et_fdb_add()
1637 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_del()
1638 int index, bin, way, rc; in sja1105et_fdb_del() local
1641 bin = sja1105et_fdb_hash(priv, addr, vid); in sja1105et_fdb_del()
1642 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, in sja1105et_fdb_del()
1646 index = sja1105et_fdb_index(bin, way); in sja1105et_fdb_del()
1672 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_add()
1678 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_add()
1686 if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) { in sja1105pqrs_fdb_add()
1703 * This is slightly inefficient because the strategy is knock-knock at in sja1105pqrs_fdb_add()
1713 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); in sja1105pqrs_fdb_add()
1714 return -EINVAL; in sja1105pqrs_fdb_add()
1743 dev_err(ds->dev, in sja1105pqrs_fdb_add()
1763 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_del()
1769 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_del()
1800 struct sja1105_private *priv = ds->priv; in sja1105_fdb_add()
1812 return -EOPNOTSUPP; in sja1105_fdb_add()
1816 mutex_lock(&priv->fdb_lock); in sja1105_fdb_add()
1817 rc = priv->info->fdb_add_cmd(ds, port, addr, vid); in sja1105_fdb_add()
1818 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_add()
1827 struct sja1105_private *priv = ds->priv; in __sja1105_fdb_del()
1838 return -EOPNOTSUPP; in __sja1105_fdb_del()
1842 return priv->info->fdb_del_cmd(ds, port, addr, vid); in __sja1105_fdb_del()
1849 struct sja1105_private *priv = ds->priv; in sja1105_fdb_del()
1852 mutex_lock(&priv->fdb_lock); in sja1105_fdb_del()
1854 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_del()
1862 struct sja1105_private *priv = ds->priv; in sja1105_fdb_dump()
1863 struct device *dev = ds->dev; in sja1105_fdb_dump()
1874 if (rc == -ENOENT) in sja1105_fdb_dump()
1884 * 1024-sized FDB table needs to be traversed 4 times through in sja1105_fdb_dump()
1911 struct sja1105_private *priv = ds->priv; in sja1105_fast_age()
1921 mutex_lock(&priv->fdb_lock); in sja1105_fast_age()
1931 if (rc == -ENOENT) in sja1105_fast_age()
1934 dev_err(ds->dev, "Failed to read FDB: %pe\n", in sja1105_fast_age()
1950 dev_err(ds->dev, in sja1105_fast_age()
1957 mutex_unlock(&priv->fdb_lock); in sja1105_fast_age()
1964 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_add()
1971 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_del()
1983 struct dsa_switch *ds = priv->ds; in sja1105_manage_flood_domains()
1986 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_manage_flood_domains()
1988 for (from = 0; from < ds->num_ports; from++) { in sja1105_manage_flood_domains()
1991 for (to = 0; to < priv->ds->num_ports; to++) { in sja1105_manage_flood_domains()
1995 if (priv->ucast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
1997 if (priv->bcast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
2022 struct sja1105_private *priv = ds->priv; in sja1105_bridge_member()
2025 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_bridge_member()
2027 for (i = 0; i < ds->num_ports; i++) { in sja1105_bridge_member()
2071 struct sja1105_private *priv = ds->priv; in sja1105_bridge_stp_state_set()
2074 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_bridge_stp_state_set()
2092 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2097 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2100 dev_err(ds->dev, "invalid STP state: %d\n", state); in sja1105_bridge_stp_state_set()
2139 #define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
2146 if (priv->info->fixed_cbs_mapping) { in sja1105_find_cbs_shaper()
2148 if (i >= 0 && i < priv->info->num_cbs_shapers) in sja1105_find_cbs_shaper()
2151 return -1; in sja1105_find_cbs_shaper()
2154 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_cbs_shaper()
2155 if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) in sja1105_find_cbs_shaper()
2158 return -1; in sja1105_find_cbs_shaper()
2165 if (priv->info->fixed_cbs_mapping) in sja1105_find_unused_cbs_shaper()
2166 return -1; in sja1105_find_unused_cbs_shaper()
2168 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_unused_cbs_shaper()
2169 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) in sja1105_find_unused_cbs_shaper()
2172 return -1; in sja1105_find_unused_cbs_shaper()
2180 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_delete_cbs_shaper()
2181 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_delete_cbs_shaper()
2183 if (cbs->port == port && cbs->prio == prio) { in sja1105_delete_cbs_shaper()
2196 struct sja1105_private *priv = ds->priv; in sja1105_setup_tc_cbs()
2201 if (!offload->enable) in sja1105_setup_tc_cbs()
2202 return sja1105_delete_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2205 index = sja1105_find_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2207 /* That isn't the case - see if we can allocate a new one */ in sja1105_setup_tc_cbs()
2210 return -ENOSPC; in sja1105_setup_tc_cbs()
2213 cbs = &priv->cbs[index]; in sja1105_setup_tc_cbs()
2214 cbs->port = port; in sja1105_setup_tc_cbs()
2215 cbs->prio = offload->queue; in sja1105_setup_tc_cbs()
2219 cbs->credit_hi = offload->hicredit; in sja1105_setup_tc_cbs()
2220 cbs->credit_lo = abs(offload->locredit); in sja1105_setup_tc_cbs()
2222 * link speed. Since the given offload->sendslope is good only for the in sja1105_setup_tc_cbs()
2223 * current link speed anyway, and user space is likely to reprogram it in sja1105_setup_tc_cbs()
2224 * when that changes, don't even bother to track the port's link speed, in sja1105_setup_tc_cbs()
2225 * but deduce the port transmit rate from idleslope - sendslope. in sja1105_setup_tc_cbs()
2227 port_transmit_rate_kbps = offload->idleslope - offload->sendslope; in sja1105_setup_tc_cbs()
2228 cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, in sja1105_setup_tc_cbs()
2230 cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), in sja1105_setup_tc_cbs()
2232 /* Convert the negative values from 64-bit 2's complement in sja1105_setup_tc_cbs()
2233 * to 32-bit 2's complement (for the case of 0x80000000 whose in sja1105_setup_tc_cbs()
2236 cbs->credit_lo &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2237 cbs->send_slope &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2250 if (!priv->cbs) in sja1105_reload_cbs()
2253 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_reload_cbs()
2254 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_reload_cbs()
2256 if (!cbs->idle_slope && !cbs->send_slope) in sja1105_reload_cbs()
2271 [SJA1105_SCHEDULING] = "Time-aware scheduling",
2272 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
2290 struct dsa_switch *ds = priv->ds; in sja1105_static_config_reload()
2296 mutex_lock(&priv->fdb_lock); in sja1105_static_config_reload()
2297 mutex_lock(&priv->mgmt_lock); in sja1105_static_config_reload()
2299 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_static_config_reload()
2301 /* Back up the dynamic link speed changed by sja1105_adjust_port_config in sja1105_static_config_reload()
2302 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the in sja1105_static_config_reload()
2306 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2308 mac[i].speed); in sja1105_static_config_reload()
2309 mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_static_config_reload()
2311 if (priv->xpcs[i]) in sja1105_static_config_reload()
2312 bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, in sja1105_static_config_reload()
2317 mutex_lock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2321 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2328 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2334 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2342 /* Mid point, corresponds to pre-reset PTPCLKVAL */ in sja1105_static_config_reload()
2343 t12 = t1 + (t2 - t1) / 2; in sja1105_static_config_reload()
2344 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ in sja1105_static_config_reload()
2345 t34 = t3 + (t4 - t3) / 2; in sja1105_static_config_reload()
2347 now += (t34 - t12); in sja1105_static_config_reload()
2351 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2353 dev_info(priv->ds->dev, in sja1105_static_config_reload()
2361 if (priv->info->clocking_setup) { in sja1105_static_config_reload()
2362 rc = priv->info->clocking_setup(priv); in sja1105_static_config_reload()
2367 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2368 struct dw_xpcs *xpcs = priv->xpcs[i]; in sja1105_static_config_reload()
2383 rc = xpcs_do_config(xpcs, priv->phy_mode[i], NULL, neg_mode); in sja1105_static_config_reload()
2388 int speed = SPEED_UNKNOWN; in sja1105_static_config_reload() local
2390 if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_static_config_reload()
2391 speed = SPEED_2500; in sja1105_static_config_reload()
2393 speed = SPEED_1000; in sja1105_static_config_reload()
2395 speed = SPEED_100; in sja1105_static_config_reload()
2397 speed = SPEED_10; in sja1105_static_config_reload()
2399 xpcs_link_up(&xpcs->pcs, neg_mode, priv->phy_mode[i], in sja1105_static_config_reload()
2400 speed, DUPLEX_FULL); in sja1105_static_config_reload()
2408 mutex_unlock(&priv->mgmt_lock); in sja1105_static_config_reload()
2409 mutex_unlock(&priv->fdb_lock); in sja1105_static_config_reload()
2418 struct sja1105_private *priv = ds->priv; in sja1105_get_tag_protocol()
2420 return priv->info->tag_proto; in sja1105_get_tag_protocol()
2431 struct sja1105_private *priv = ds->priv; in sja1105_vlan_filtering()
2437 list_for_each_entry(rule, &priv->flow_block.rules, list) { in sja1105_vlan_filtering()
2438 if (rule->type == SJA1105_RULE_VL) { in sja1105_vlan_filtering()
2441 return -EBUSY; in sja1105_vlan_filtering()
2455 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_vlan_filtering()
2456 general_params = table->entries; in sja1105_vlan_filtering()
2457 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ in sja1105_vlan_filtering()
2458 general_params->tpid = tpid; in sja1105_vlan_filtering()
2459 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ in sja1105_vlan_filtering()
2460 general_params->tpid2 = tpid2; in sja1105_vlan_filtering()
2462 for (port = 0; port < ds->num_ports; port++) { in sja1105_vlan_filtering()
2485 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_add()
2489 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_vlan_add()
2492 match = table->entry_count - 1; in sja1105_vlan_add()
2496 vlan = table->entries; in sja1105_vlan_add()
2523 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_del()
2531 vlan = table->entries; in sja1105_vlan_del()
2562 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_add()
2563 u16 flags = vlan->flags; in sja1105_bridge_vlan_add()
2568 if (vid_is_dsa_8021q(vlan->vid)) { in sja1105_bridge_vlan_add()
2570 "Range 3072-4095 reserved for dsa_8021q operation"); in sja1105_bridge_vlan_add()
2571 return -EBUSY; in sja1105_bridge_vlan_add()
2574 /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */ in sja1105_bridge_vlan_add()
2578 rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true); in sja1105_bridge_vlan_add()
2582 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) in sja1105_bridge_vlan_add()
2583 priv->bridge_pvid[port] = vlan->vid; in sja1105_bridge_vlan_add()
2591 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_del()
2594 rc = sja1105_vlan_del(priv, port, vlan->vid); in sja1105_bridge_vlan_del()
2607 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_add()
2622 priv->tag_8021q_pvid[port] = vid; in sja1105_dsa_8021q_vlan_add()
2629 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_del()
2637 struct netlink_ext_ack *extack = info->info.extack; in sja1105_prechangeupper()
2638 struct net_device *upper = info->upper_dev; in sja1105_prechangeupper()
2639 struct dsa_switch_tree *dst = ds->dst; in sja1105_prechangeupper()
2644 return -EBUSY; in sja1105_prechangeupper()
2648 list_for_each_entry(dp, &dst->ports, list) { in sja1105_prechangeupper()
2653 "Only one VLAN-aware bridge is supported"); in sja1105_prechangeupper()
2654 return -EBUSY; in sja1105_prechangeupper()
2666 struct sja1105_private *priv = ds->priv; in sja1105_mgmt_xmit()
2673 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); in sja1105_mgmt_xmit()
2687 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); in sja1105_mgmt_xmit()
2694 dev_err_ratelimited(priv->ds->dev, in sja1105_mgmt_xmit()
2704 } while (mgmt_route.enfport && --timeout); in sja1105_mgmt_xmit()
2707 /* Clean up the management route so that a follow-up in sja1105_mgmt_xmit()
2709 * This is only hardware supported on P/Q/R/S - on E/T it is in sja1105_mgmt_xmit()
2710 * a no-op and we are silently discarding the -EOPNOTSUPP. in sja1105_mgmt_xmit()
2714 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); in sja1105_mgmt_xmit()
2730 struct sk_buff *clone, *skb = xmit_work->skb; in sja1105_port_deferred_xmit()
2731 struct dsa_switch *ds = xmit_work->dp->ds; in sja1105_port_deferred_xmit()
2732 struct sja1105_private *priv = ds->priv; in sja1105_port_deferred_xmit()
2733 int port = xmit_work->dp->index; in sja1105_port_deferred_xmit()
2735 clone = SJA1105_SKB_CB(skb)->clone; in sja1105_port_deferred_xmit()
2737 mutex_lock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2745 mutex_unlock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2753 struct sja1105_private *priv = ds->priv; in sja1105_connect_tag_protocol()
2756 if (proto != priv->info->tag_proto) in sja1105_connect_tag_protocol()
2757 return -EPROTONOSUPPORT; in sja1105_connect_tag_protocol()
2760 tagger_data->xmit_work_fn = sja1105_port_deferred_xmit; in sja1105_connect_tag_protocol()
2761 tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp; in sja1105_connect_tag_protocol()
2773 struct sja1105_private *priv = ds->priv; in sja1105_set_ageing_time()
2777 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_set_ageing_time()
2778 l2_lookup_params = table->entries; in sja1105_set_ageing_time()
2782 if (l2_lookup_params->maxage == maxage) in sja1105_set_ageing_time()
2785 l2_lookup_params->maxage = maxage; in sja1105_set_ageing_time()
2793 struct sja1105_private *priv = ds->priv; in sja1105_change_mtu()
2800 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_change_mtu()
2812 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; in sja1105_get_max_mtu()
2825 return -EOPNOTSUPP; in sja1105_port_setup_tc()
2840 struct dsa_switch *ds = priv->ds; in sja1105_mirror_apply()
2846 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_mirror_apply()
2847 general_params = table->entries; in sja1105_mirror_apply()
2849 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_mirror_apply()
2851 already_enabled = (general_params->mirr_port != ds->num_ports); in sja1105_mirror_apply()
2852 if (already_enabled && enabled && general_params->mirr_port != to) { in sja1105_mirror_apply()
2853 dev_err(priv->ds->dev, in sja1105_mirror_apply()
2855 general_params->mirr_port); in sja1105_mirror_apply()
2856 return -EBUSY; in sja1105_mirror_apply()
2865 for (port = 0; port < ds->num_ports; port++) { in sja1105_mirror_apply()
2873 new_mirr_port = ds->num_ports; in sja1105_mirror_apply()
2875 if (new_mirr_port != general_params->mirr_port) { in sja1105_mirror_apply()
2876 general_params->mirr_port = new_mirr_port; in sja1105_mirror_apply()
2897 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_add()
2904 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_del()
2905 mirror->ingress, false); in sja1105_mirror_del()
2912 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_add()
2914 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_add()
2920 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, in sja1105_port_policer_add()
2922 policing[port].smax = policer->burst; in sja1105_port_policer_add()
2930 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_del()
2932 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_del()
2945 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_port_set_learning()
2958 priv->ucast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2960 priv->ucast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2965 priv->bcast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2967 priv->bcast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2981 mutex_lock(&priv->fdb_lock); in sja1105_port_mcast_flood()
2983 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_port_mcast_flood()
2984 l2_lookup = table->entries; in sja1105_port_mcast_flood()
2986 for (match = 0; match < table->entry_count; match++) in sja1105_port_mcast_flood()
2991 if (match == table->entry_count) { in sja1105_port_mcast_flood()
2994 rc = -ENOSPC; in sja1105_port_mcast_flood()
3007 mutex_unlock(&priv->fdb_lock); in sja1105_port_mcast_flood()
3016 struct sja1105_private *priv = ds->priv; in sja1105_port_pre_bridge_flags()
3020 return -EINVAL; in sja1105_port_pre_bridge_flags()
3023 !priv->info->can_limit_mcast_flood) { in sja1105_port_pre_bridge_flags()
3030 return -EINVAL; in sja1105_port_pre_bridge_flags()
3041 struct sja1105_private *priv = ds->priv; in sja1105_port_bridge_flags()
3062 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) { in sja1105_port_bridge_flags()
3072 /* The programming model for the SJA1105 switch is "all-at-once" via static
3079 * Setting correct PHY link speed does not matter now.
3086 struct sja1105_private *priv = ds->priv; in sja1105_setup()
3089 if (priv->info->disable_microcontroller) { in sja1105_setup()
3090 rc = priv->info->disable_microcontroller(priv); in sja1105_setup()
3092 dev_err(ds->dev, in sja1105_setup()
3102 dev_err(ds->dev, "Failed to load static config: %d\n", rc); in sja1105_setup()
3107 if (priv->info->clocking_setup) { in sja1105_setup()
3108 rc = priv->info->clocking_setup(priv); in sja1105_setup()
3110 dev_err(ds->dev, in sja1105_setup()
3122 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); in sja1105_setup()
3128 dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", in sja1105_setup()
3151 ds->vlan_filtering_is_global = true; in sja1105_setup()
3152 ds->untag_bridge_pvid = true; in sja1105_setup()
3153 ds->fdb_isolation = true; in sja1105_setup()
3155 ds->max_num_bridges = 7; in sja1105_setup()
3158 ds->num_tx_queues = SJA1105_NUM_TC; in sja1105_setup()
3160 ds->mtu_enforcement_ingress = true; in sja1105_setup()
3161 ds->assisted_learning_on_cpu_port = true; in sja1105_setup()
3175 sja1105_static_config_free(&priv->static_config); in sja1105_setup()
3182 struct sja1105_private *priv = ds->priv; in sja1105_teardown()
3193 sja1105_static_config_free(&priv->static_config); in sja1105_teardown()
3248 const struct sja1105_regs *regs = priv->info->regs; in sja1105_check_device_id()
3250 struct device *dev = &priv->spidev->dev; in sja1105_check_device_id()
3256 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, in sja1105_check_device_id()
3261 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, in sja1105_check_device_id()
3268 for (match = sja1105_dt_ids; match->compatible[0]; match++) { in sja1105_check_device_id()
3269 const struct sja1105_info *info = match->data; in sja1105_check_device_id()
3272 if (info->device_id != device_id || info->part_no != part_no) in sja1105_check_device_id()
3276 if (priv->info->device_id != device_id || in sja1105_check_device_id()
3277 priv->info->part_no != part_no) { in sja1105_check_device_id()
3279 priv->info->name, info->name); in sja1105_check_device_id()
3281 priv->info = info; in sja1105_check_device_id()
3290 return -ENODEV; in sja1105_check_device_id()
3295 struct device *dev = &spi->dev; in sja1105_probe()
3301 if (!dev->of_node) { in sja1105_probe()
3303 return -EINVAL; in sja1105_probe()
3312 return -ENOMEM; in sja1105_probe()
3317 priv->spidev = spi; in sja1105_probe()
3321 spi->bits_per_word = 8; in sja1105_probe()
3341 /* We need to send at least one 64-bit word of SPI payload per message in sja1105_probe()
3346 return -EINVAL; in sja1105_probe()
3349 priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN; in sja1105_probe()
3350 if (priv->max_xfer_len > max_xfer) in sja1105_probe()
3351 priv->max_xfer_len = max_xfer; in sja1105_probe()
3352 if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER) in sja1105_probe()
3353 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER; in sja1105_probe()
3355 priv->info = of_device_get_match_data(dev); in sja1105_probe()
3364 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); in sja1105_probe()
3368 return -ENOMEM; in sja1105_probe()
3370 ds->dev = dev; in sja1105_probe()
3371 ds->num_ports = priv->info->num_ports; in sja1105_probe()
3372 ds->ops = &sja1105_switch_ops; in sja1105_probe()
3373 ds->priv = priv; in sja1105_probe()
3374 priv->ds = ds; in sja1105_probe()
3376 mutex_init(&priv->ptp_data.lock); in sja1105_probe()
3377 mutex_init(&priv->dynamic_config_lock); in sja1105_probe()
3378 mutex_init(&priv->mgmt_lock); in sja1105_probe()
3379 mutex_init(&priv->fdb_lock); in sja1105_probe()
3380 spin_lock_init(&priv->ts_id_lock); in sja1105_probe()
3384 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); in sja1105_probe()
3389 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, in sja1105_probe()
3392 if (!priv->cbs) in sja1105_probe()
3393 return -ENOMEM; in sja1105_probe()
3396 return dsa_register_switch(priv->ds); in sja1105_probe()
3406 dsa_unregister_switch(priv->ds); in sja1105_remove()
3416 dsa_switch_shutdown(priv->ds); in sja1105_shutdown()
3466 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");