1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 #include "sja1105_sgmii.h" 26 #include "sja1105_tas.h" 27 28 #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull 29 #define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) 30 31 static const struct dsa_switch_ops sja1105_switch_ops; 32 33 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 34 unsigned int startup_delay) 35 { 36 gpiod_set_value_cansleep(gpio, 1); 37 /* Wait for minimum reset pulse length */ 38 msleep(pulse_len); 39 gpiod_set_value_cansleep(gpio, 0); 40 /* Wait until chip is ready after reset */ 41 msleep(startup_delay); 42 } 43 44 static void 45 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 46 int from, int to, bool allow) 47 { 48 if (allow) 49 l2_fwd[from].reach_port |= BIT(to); 50 else 51 l2_fwd[from].reach_port &= ~BIT(to); 52 } 53 54 static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd, 55 int from, int to) 56 { 57 return !!(l2_fwd[from].reach_port & BIT(to)); 58 } 59 60 /* Structure used to temporarily transport device tree 61 * settings into sja1105_setup 62 */ 63 struct sja1105_dt_port { 64 phy_interface_t phy_mode; 65 sja1105_mii_role_t role; 66 }; 67 68 static int sja1105_init_mac_settings(struct sja1105_private *priv) 69 { 70 struct sja1105_mac_config_entry default_mac = { 71 /* Enable all 8 priority queues on egress. 72 * Every queue i holds top[i] - base[i] frames. 73 * Sum of top[i] - base[i] is 511 (max hardware limit). 74 */ 75 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 76 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 77 .enabled = {true, true, true, true, true, true, true, true}, 78 /* Keep standard IFG of 12 bytes on egress. */ 79 .ifg = 0, 80 /* Always put the MAC speed in automatic mode, where it can be 81 * adjusted at runtime by PHYLINK. 82 */ 83 .speed = SJA1105_SPEED_AUTO, 84 /* No static correction for 1-step 1588 events */ 85 .tp_delin = 0, 86 .tp_delout = 0, 87 /* Disable aging for critical TTEthernet traffic */ 88 .maxage = 0xFF, 89 /* Internal VLAN (pvid) to apply to untagged ingress */ 90 .vlanprio = 0, 91 .vlanid = 1, 92 .ing_mirr = false, 93 .egr_mirr = false, 94 /* Don't drop traffic with other EtherType than ETH_P_IP */ 95 .drpnona664 = false, 96 /* Don't drop double-tagged traffic */ 97 .drpdtag = false, 98 /* Don't drop untagged traffic */ 99 .drpuntag = false, 100 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 101 .retag = false, 102 /* Disable learning and I/O on user ports by default - 103 * STP will enable it. 104 */ 105 .dyn_learn = false, 106 .egress = false, 107 .ingress = false, 108 }; 109 struct sja1105_mac_config_entry *mac; 110 struct dsa_switch *ds = priv->ds; 111 struct sja1105_table *table; 112 int i; 113 114 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 115 116 /* Discard previous MAC Configuration Table */ 117 if (table->entry_count) { 118 kfree(table->entries); 119 table->entry_count = 0; 120 } 121 122 table->entries = kcalloc(table->ops->max_entry_count, 123 table->ops->unpacked_entry_size, GFP_KERNEL); 124 if (!table->entries) 125 return -ENOMEM; 126 127 table->entry_count = table->ops->max_entry_count; 128 129 mac = table->entries; 130 131 for (i = 0; i < ds->num_ports; i++) { 132 mac[i] = default_mac; 133 if (i == dsa_upstream_port(priv->ds, i)) { 134 /* STP doesn't get called for CPU port, so we need to 135 * set the I/O parameters statically. 136 */ 137 mac[i].dyn_learn = true; 138 mac[i].ingress = true; 139 mac[i].egress = true; 140 } 141 } 142 143 return 0; 144 } 145 146 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port) 147 { 148 if (priv->info->part_no != SJA1105R_PART_NO && 149 priv->info->part_no != SJA1105S_PART_NO) 150 return false; 151 152 if (port != SJA1105_SGMII_PORT) 153 return false; 154 155 if (dsa_is_unused_port(priv->ds, port)) 156 return false; 157 158 return true; 159 } 160 161 static int sja1105_init_mii_settings(struct sja1105_private *priv, 162 struct sja1105_dt_port *ports) 163 { 164 struct device *dev = &priv->spidev->dev; 165 struct sja1105_xmii_params_entry *mii; 166 struct dsa_switch *ds = priv->ds; 167 struct sja1105_table *table; 168 int i; 169 170 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 171 172 /* Discard previous xMII Mode Parameters Table */ 173 if (table->entry_count) { 174 kfree(table->entries); 175 table->entry_count = 0; 176 } 177 178 table->entries = kcalloc(table->ops->max_entry_count, 179 table->ops->unpacked_entry_size, GFP_KERNEL); 180 if (!table->entries) 181 return -ENOMEM; 182 183 /* Override table based on PHYLINK DT bindings */ 184 table->entry_count = table->ops->max_entry_count; 185 186 mii = table->entries; 187 188 for (i = 0; i < ds->num_ports; i++) { 189 if (dsa_is_unused_port(priv->ds, i)) 190 continue; 191 192 switch (ports[i].phy_mode) { 193 case PHY_INTERFACE_MODE_MII: 194 mii->xmii_mode[i] = XMII_MODE_MII; 195 break; 196 case PHY_INTERFACE_MODE_RMII: 197 mii->xmii_mode[i] = XMII_MODE_RMII; 198 break; 199 case PHY_INTERFACE_MODE_RGMII: 200 case PHY_INTERFACE_MODE_RGMII_ID: 201 case PHY_INTERFACE_MODE_RGMII_RXID: 202 case PHY_INTERFACE_MODE_RGMII_TXID: 203 mii->xmii_mode[i] = XMII_MODE_RGMII; 204 break; 205 case PHY_INTERFACE_MODE_SGMII: 206 if (!sja1105_supports_sgmii(priv, i)) 207 return -EINVAL; 208 mii->xmii_mode[i] = XMII_MODE_SGMII; 209 break; 210 default: 211 dev_err(dev, "Unsupported PHY mode %s!\n", 212 phy_modes(ports[i].phy_mode)); 213 return -EINVAL; 214 } 215 216 /* Even though the SerDes port is able to drive SGMII autoneg 217 * like a PHY would, from the perspective of the XMII tables, 218 * the SGMII port should always be put in MAC mode. 219 */ 220 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII) 221 mii->phy_mac[i] = XMII_MAC; 222 else 223 mii->phy_mac[i] = ports[i].role; 224 } 225 return 0; 226 } 227 228 static int sja1105_init_static_fdb(struct sja1105_private *priv) 229 { 230 struct sja1105_l2_lookup_entry *l2_lookup; 231 struct sja1105_table *table; 232 int port; 233 234 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 235 236 /* We only populate the FDB table through dynamic L2 Address Lookup 237 * entries, except for a special entry at the end which is a catch-all 238 * for unknown multicast and will be used to control flooding domain. 239 */ 240 if (table->entry_count) { 241 kfree(table->entries); 242 table->entry_count = 0; 243 } 244 245 if (!priv->info->can_limit_mcast_flood) 246 return 0; 247 248 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 249 GFP_KERNEL); 250 if (!table->entries) 251 return -ENOMEM; 252 253 table->entry_count = 1; 254 l2_lookup = table->entries; 255 256 /* All L2 multicast addresses have an odd first octet */ 257 l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST; 258 l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST; 259 l2_lookup[0].lockeds = true; 260 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1; 261 262 /* Flood multicast to every port by default */ 263 for (port = 0; port < priv->ds->num_ports; port++) 264 if (!dsa_is_unused_port(priv->ds, port)) 265 l2_lookup[0].destports |= BIT(port); 266 267 return 0; 268 } 269 270 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 271 { 272 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 273 /* Learned FDB entries are forgotten after 300 seconds */ 274 .maxage = SJA1105_AGEING_TIME_MS(300000), 275 /* All entries within a FDB bin are available for learning */ 276 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 277 /* And the P/Q/R/S equivalent setting: */ 278 .start_dynspc = 0, 279 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 280 .poly = 0x97, 281 /* This selects between Independent VLAN Learning (IVL) and 282 * Shared VLAN Learning (SVL) 283 */ 284 .shared_learn = true, 285 /* Don't discard management traffic based on ENFPORT - 286 * we don't perform SMAC port enforcement anyway, so 287 * what we are setting here doesn't matter. 288 */ 289 .no_enf_hostprt = false, 290 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 291 * Maybe correlate with no_linklocal_learn from bridge driver? 292 */ 293 .no_mgmt_learn = true, 294 /* P/Q/R/S only */ 295 .use_static = true, 296 /* Dynamically learned FDB entries can overwrite other (older) 297 * dynamic FDB entries 298 */ 299 .owr_dyn = true, 300 .drpnolearn = true, 301 }; 302 struct dsa_switch *ds = priv->ds; 303 int port, num_used_ports = 0; 304 struct sja1105_table *table; 305 u64 max_fdb_entries; 306 307 for (port = 0; port < ds->num_ports; port++) 308 if (!dsa_is_unused_port(ds, port)) 309 num_used_ports++; 310 311 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports; 312 313 for (port = 0; port < ds->num_ports; port++) { 314 if (dsa_is_unused_port(ds, port)) 315 continue; 316 317 default_l2_lookup_params.maxaddrp[port] = max_fdb_entries; 318 } 319 320 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 321 322 if (table->entry_count) { 323 kfree(table->entries); 324 table->entry_count = 0; 325 } 326 327 table->entries = kcalloc(table->ops->max_entry_count, 328 table->ops->unpacked_entry_size, GFP_KERNEL); 329 if (!table->entries) 330 return -ENOMEM; 331 332 table->entry_count = table->ops->max_entry_count; 333 334 /* This table only has a single entry */ 335 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 336 default_l2_lookup_params; 337 338 return 0; 339 } 340 341 /* Set up a default VLAN for untagged traffic injected from the CPU 342 * using management routes (e.g. STP, PTP) as opposed to tag_8021q. 343 * All DT-defined ports are members of this VLAN, and there are no 344 * restrictions on forwarding (since the CPU selects the destination). 345 * Frames from this VLAN will always be transmitted as untagged, and 346 * neither the bridge nor the 8021q module cannot create this VLAN ID. 347 */ 348 static int sja1105_init_static_vlan(struct sja1105_private *priv) 349 { 350 struct sja1105_table *table; 351 struct sja1105_vlan_lookup_entry pvid = { 352 .ving_mirr = 0, 353 .vegr_mirr = 0, 354 .vmemb_port = 0, 355 .vlan_bc = 0, 356 .tag_port = 0, 357 .vlanid = SJA1105_DEFAULT_VLAN, 358 }; 359 struct dsa_switch *ds = priv->ds; 360 int port; 361 362 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 363 364 if (table->entry_count) { 365 kfree(table->entries); 366 table->entry_count = 0; 367 } 368 369 table->entries = kzalloc(table->ops->unpacked_entry_size, 370 GFP_KERNEL); 371 if (!table->entries) 372 return -ENOMEM; 373 374 table->entry_count = 1; 375 376 for (port = 0; port < ds->num_ports; port++) { 377 struct sja1105_bridge_vlan *v; 378 379 if (dsa_is_unused_port(ds, port)) 380 continue; 381 382 pvid.vmemb_port |= BIT(port); 383 pvid.vlan_bc |= BIT(port); 384 pvid.tag_port &= ~BIT(port); 385 386 v = kzalloc(sizeof(*v), GFP_KERNEL); 387 if (!v) 388 return -ENOMEM; 389 390 v->port = port; 391 v->vid = SJA1105_DEFAULT_VLAN; 392 v->untagged = true; 393 if (dsa_is_cpu_port(ds, port)) 394 v->pvid = true; 395 list_add(&v->list, &priv->dsa_8021q_vlans); 396 } 397 398 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 399 return 0; 400 } 401 402 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 403 { 404 struct sja1105_l2_forwarding_entry *l2fwd; 405 struct dsa_switch *ds = priv->ds; 406 struct sja1105_table *table; 407 int i, j; 408 409 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 410 411 if (table->entry_count) { 412 kfree(table->entries); 413 table->entry_count = 0; 414 } 415 416 table->entries = kcalloc(table->ops->max_entry_count, 417 table->ops->unpacked_entry_size, GFP_KERNEL); 418 if (!table->entries) 419 return -ENOMEM; 420 421 table->entry_count = table->ops->max_entry_count; 422 423 l2fwd = table->entries; 424 425 /* First 5 entries define the forwarding rules */ 426 for (i = 0; i < ds->num_ports; i++) { 427 unsigned int upstream = dsa_upstream_port(priv->ds, i); 428 429 if (dsa_is_unused_port(ds, i)) 430 continue; 431 432 for (j = 0; j < SJA1105_NUM_TC; j++) 433 l2fwd[i].vlan_pmap[j] = j; 434 435 /* All ports start up with egress flooding enabled, 436 * including the CPU port. 437 */ 438 priv->ucast_egress_floods |= BIT(i); 439 priv->bcast_egress_floods |= BIT(i); 440 441 if (i == upstream) 442 continue; 443 444 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 445 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 446 447 l2fwd[i].bc_domain = BIT(upstream); 448 l2fwd[i].fl_domain = BIT(upstream); 449 450 l2fwd[upstream].bc_domain |= BIT(i); 451 l2fwd[upstream].fl_domain |= BIT(i); 452 } 453 454 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 455 * Create a one-to-one mapping. 456 */ 457 for (i = 0; i < SJA1105_NUM_TC; i++) { 458 for (j = 0; j < ds->num_ports; j++) { 459 if (dsa_is_unused_port(ds, j)) 460 continue; 461 462 l2fwd[ds->num_ports + i].vlan_pmap[j] = i; 463 } 464 } 465 466 return 0; 467 } 468 469 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 470 { 471 struct sja1105_l2_forwarding_params_entry *l2fwd_params; 472 struct sja1105_table *table; 473 474 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 475 476 if (table->entry_count) { 477 kfree(table->entries); 478 table->entry_count = 0; 479 } 480 481 table->entries = kcalloc(table->ops->max_entry_count, 482 table->ops->unpacked_entry_size, GFP_KERNEL); 483 if (!table->entries) 484 return -ENOMEM; 485 486 table->entry_count = table->ops->max_entry_count; 487 488 /* This table only has a single entry */ 489 l2fwd_params = table->entries; 490 491 /* Disallow dynamic reconfiguration of vlan_pmap */ 492 l2fwd_params->max_dynp = 0; 493 /* Use a single memory partition for all ingress queues */ 494 l2fwd_params->part_spc[0] = priv->info->max_frame_mem; 495 496 return 0; 497 } 498 499 void sja1105_frame_memory_partitioning(struct sja1105_private *priv) 500 { 501 struct sja1105_l2_forwarding_params_entry *l2_fwd_params; 502 struct sja1105_vl_forwarding_params_entry *vl_fwd_params; 503 int max_mem = priv->info->max_frame_mem; 504 struct sja1105_table *table; 505 506 /* VLAN retagging is implemented using a loopback port that consumes 507 * frame buffers. That leaves less for us. 508 */ 509 if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT) 510 max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD; 511 512 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 513 l2_fwd_params = table->entries; 514 l2_fwd_params->part_spc[0] = max_mem; 515 516 /* If we have any critical-traffic virtual links, we need to reserve 517 * some frame buffer memory for them. At the moment, hardcode the value 518 * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks 519 * remaining for best-effort traffic. TODO: figure out a more flexible 520 * way to perform the frame buffer partitioning. 521 */ 522 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count) 523 return; 524 525 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS]; 526 vl_fwd_params = table->entries; 527 528 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY; 529 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY; 530 } 531 532 static int sja1105_init_general_params(struct sja1105_private *priv) 533 { 534 struct sja1105_general_params_entry default_general_params = { 535 /* Allow dynamic changing of the mirror port */ 536 .mirr_ptacu = true, 537 .switchid = priv->ds->index, 538 /* Priority queue for link-local management frames 539 * (both ingress to and egress from CPU - PTP, STP etc) 540 */ 541 .hostprio = 7, 542 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 543 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 544 .incl_srcpt1 = false, 545 .send_meta1 = false, 546 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 547 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 548 .incl_srcpt0 = false, 549 .send_meta0 = false, 550 /* The destination for traffic matching mac_fltres1 and 551 * mac_fltres0 on all ports except host_port. Such traffic 552 * receieved on host_port itself would be dropped, except 553 * by installing a temporary 'management route' 554 */ 555 .host_port = priv->ds->num_ports, 556 /* Default to an invalid value */ 557 .mirr_port = priv->ds->num_ports, 558 /* Link-local traffic received on casc_port will be forwarded 559 * to host_port without embedding the source port and device ID 560 * info in the destination MAC address (presumably because it 561 * is a cascaded port and a downstream SJA switch already did 562 * that). Default to an invalid port (to disable the feature) 563 * and overwrite this if we find any DSA (cascaded) ports. 564 */ 565 .casc_port = priv->ds->num_ports, 566 /* No TTEthernet */ 567 .vllupformat = SJA1105_VL_FORMAT_PSFP, 568 .vlmarker = 0, 569 .vlmask = 0, 570 /* Only update correctionField for 1-step PTP (L2 transport) */ 571 .ignore2stf = 0, 572 /* Forcefully disable VLAN filtering by telling 573 * the switch that VLAN has a different EtherType. 574 */ 575 .tpid = ETH_P_SJA1105, 576 .tpid2 = ETH_P_SJA1105, 577 }; 578 struct dsa_switch *ds = priv->ds; 579 struct sja1105_table *table; 580 int port; 581 582 for (port = 0; port < ds->num_ports; port++) { 583 if (dsa_is_cpu_port(ds, port)) { 584 default_general_params.host_port = port; 585 break; 586 } 587 } 588 589 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 590 591 if (table->entry_count) { 592 kfree(table->entries); 593 table->entry_count = 0; 594 } 595 596 table->entries = kcalloc(table->ops->max_entry_count, 597 table->ops->unpacked_entry_size, GFP_KERNEL); 598 if (!table->entries) 599 return -ENOMEM; 600 601 table->entry_count = table->ops->max_entry_count; 602 603 /* This table only has a single entry */ 604 ((struct sja1105_general_params_entry *)table->entries)[0] = 605 default_general_params; 606 607 return 0; 608 } 609 610 static int sja1105_init_avb_params(struct sja1105_private *priv) 611 { 612 struct sja1105_avb_params_entry *avb; 613 struct sja1105_table *table; 614 615 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 616 617 /* Discard previous AVB Parameters Table */ 618 if (table->entry_count) { 619 kfree(table->entries); 620 table->entry_count = 0; 621 } 622 623 table->entries = kcalloc(table->ops->max_entry_count, 624 table->ops->unpacked_entry_size, GFP_KERNEL); 625 if (!table->entries) 626 return -ENOMEM; 627 628 table->entry_count = table->ops->max_entry_count; 629 630 avb = table->entries; 631 632 /* Configure the MAC addresses for meta frames */ 633 avb->destmeta = SJA1105_META_DMAC; 634 avb->srcmeta = SJA1105_META_SMAC; 635 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by 636 * default. This is because there might be boards with a hardware 637 * layout where enabling the pin as output might cause an electrical 638 * clash. On E/T the pin is always an output, which the board designers 639 * probably already knew, so even if there are going to be electrical 640 * issues, there's nothing we can do. 641 */ 642 avb->cas_master = false; 643 644 return 0; 645 } 646 647 /* The L2 policing table is 2-stage. The table is looked up for each frame 648 * according to the ingress port, whether it was broadcast or not, and the 649 * classified traffic class (given by VLAN PCP). This portion of the lookup is 650 * fixed, and gives access to the SHARINDX, an indirection register pointing 651 * within the policing table itself, which is used to resolve the policer that 652 * will be used for this frame. 653 * 654 * Stage 1 Stage 2 655 * +------------+--------+ +---------------------------------+ 656 * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU | 657 * +------------+--------+ +---------------------------------+ 658 * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU | 659 * +------------+--------+ +---------------------------------+ 660 * ... | Policer 2: Rate, Burst, MTU | 661 * +------------+--------+ +---------------------------------+ 662 * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU | 663 * +------------+--------+ +---------------------------------+ 664 * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU | 665 * +------------+--------+ +---------------------------------+ 666 * ... | Policer 5: Rate, Burst, MTU | 667 * +------------+--------+ +---------------------------------+ 668 * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU | 669 * +------------+--------+ +---------------------------------+ 670 * ... | Policer 7: Rate, Burst, MTU | 671 * +------------+--------+ +---------------------------------+ 672 * |Port 4 TC 7 |SHARINDX| ... 673 * +------------+--------+ 674 * |Port 0 BCAST|SHARINDX| ... 675 * +------------+--------+ 676 * |Port 1 BCAST|SHARINDX| ... 677 * +------------+--------+ 678 * ... ... 679 * +------------+--------+ +---------------------------------+ 680 * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU | 681 * +------------+--------+ +---------------------------------+ 682 * 683 * In this driver, we shall use policers 0-4 as statically alocated port 684 * (matchall) policers. So we need to make the SHARINDX for all lookups 685 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast 686 * lookup) equal. 687 * The remaining policers (40) shall be dynamically allocated for flower 688 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff. 689 */ 690 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 691 692 static int sja1105_init_l2_policing(struct sja1105_private *priv) 693 { 694 struct sja1105_l2_policing_entry *policing; 695 struct dsa_switch *ds = priv->ds; 696 struct sja1105_table *table; 697 int port, tc; 698 699 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 700 701 /* Discard previous L2 Policing Table */ 702 if (table->entry_count) { 703 kfree(table->entries); 704 table->entry_count = 0; 705 } 706 707 table->entries = kcalloc(table->ops->max_entry_count, 708 table->ops->unpacked_entry_size, GFP_KERNEL); 709 if (!table->entries) 710 return -ENOMEM; 711 712 table->entry_count = table->ops->max_entry_count; 713 714 policing = table->entries; 715 716 /* Setup shared indices for the matchall policers */ 717 for (port = 0; port < ds->num_ports; port++) { 718 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port; 719 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port; 720 721 for (tc = 0; tc < SJA1105_NUM_TC; tc++) 722 policing[port * SJA1105_NUM_TC + tc].sharindx = port; 723 724 policing[bcast].sharindx = port; 725 /* Only SJA1110 has multicast policers */ 726 if (mcast <= table->ops->max_entry_count) 727 policing[mcast].sharindx = port; 728 } 729 730 /* Setup the matchall policer parameters */ 731 for (port = 0; port < ds->num_ports; port++) { 732 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 733 734 if (dsa_is_cpu_port(priv->ds, port)) 735 mtu += VLAN_HLEN; 736 737 policing[port].smax = 65535; /* Burst size in bytes */ 738 policing[port].rate = SJA1105_RATE_MBPS(1000); 739 policing[port].maxlen = mtu; 740 policing[port].partition = 0; 741 } 742 743 return 0; 744 } 745 746 static int sja1105_static_config_load(struct sja1105_private *priv, 747 struct sja1105_dt_port *ports) 748 { 749 int rc; 750 751 sja1105_static_config_free(&priv->static_config); 752 rc = sja1105_static_config_init(&priv->static_config, 753 priv->info->static_ops, 754 priv->info->device_id); 755 if (rc) 756 return rc; 757 758 /* Build static configuration */ 759 rc = sja1105_init_mac_settings(priv); 760 if (rc < 0) 761 return rc; 762 rc = sja1105_init_mii_settings(priv, ports); 763 if (rc < 0) 764 return rc; 765 rc = sja1105_init_static_fdb(priv); 766 if (rc < 0) 767 return rc; 768 rc = sja1105_init_static_vlan(priv); 769 if (rc < 0) 770 return rc; 771 rc = sja1105_init_l2_lookup_params(priv); 772 if (rc < 0) 773 return rc; 774 rc = sja1105_init_l2_forwarding(priv); 775 if (rc < 0) 776 return rc; 777 rc = sja1105_init_l2_forwarding_params(priv); 778 if (rc < 0) 779 return rc; 780 rc = sja1105_init_l2_policing(priv); 781 if (rc < 0) 782 return rc; 783 rc = sja1105_init_general_params(priv); 784 if (rc < 0) 785 return rc; 786 rc = sja1105_init_avb_params(priv); 787 if (rc < 0) 788 return rc; 789 790 /* Send initial configuration to hardware via SPI */ 791 return sja1105_static_config_upload(priv); 792 } 793 794 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 795 const struct sja1105_dt_port *ports) 796 { 797 struct dsa_switch *ds = priv->ds; 798 int i; 799 800 for (i = 0; i < ds->num_ports; i++) { 801 if (ports[i].role == XMII_MAC) 802 continue; 803 804 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 805 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 806 priv->rgmii_rx_delay[i] = true; 807 808 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 809 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 810 priv->rgmii_tx_delay[i] = true; 811 812 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 813 !priv->info->setup_rgmii_delay) 814 return -EINVAL; 815 } 816 return 0; 817 } 818 819 static int sja1105_parse_ports_node(struct sja1105_private *priv, 820 struct sja1105_dt_port *ports, 821 struct device_node *ports_node) 822 { 823 struct device *dev = &priv->spidev->dev; 824 struct device_node *child; 825 826 for_each_available_child_of_node(ports_node, child) { 827 struct device_node *phy_node; 828 phy_interface_t phy_mode; 829 u32 index; 830 int err; 831 832 /* Get switch port number from DT */ 833 if (of_property_read_u32(child, "reg", &index) < 0) { 834 dev_err(dev, "Port number not defined in device tree " 835 "(property \"reg\")\n"); 836 of_node_put(child); 837 return -ENODEV; 838 } 839 840 /* Get PHY mode from DT */ 841 err = of_get_phy_mode(child, &phy_mode); 842 if (err) { 843 dev_err(dev, "Failed to read phy-mode or " 844 "phy-interface-type property for port %d\n", 845 index); 846 of_node_put(child); 847 return -ENODEV; 848 } 849 ports[index].phy_mode = phy_mode; 850 851 phy_node = of_parse_phandle(child, "phy-handle", 0); 852 if (!phy_node) { 853 if (!of_phy_is_fixed_link(child)) { 854 dev_err(dev, "phy-handle or fixed-link " 855 "properties missing!\n"); 856 of_node_put(child); 857 return -ENODEV; 858 } 859 /* phy-handle is missing, but fixed-link isn't. 860 * So it's a fixed link. Default to PHY role. 861 */ 862 ports[index].role = XMII_PHY; 863 } else { 864 /* phy-handle present => put port in MAC role */ 865 ports[index].role = XMII_MAC; 866 of_node_put(phy_node); 867 } 868 869 /* The MAC/PHY role can be overridden with explicit bindings */ 870 if (of_property_read_bool(child, "sja1105,role-mac")) 871 ports[index].role = XMII_MAC; 872 else if (of_property_read_bool(child, "sja1105,role-phy")) 873 ports[index].role = XMII_PHY; 874 } 875 876 return 0; 877 } 878 879 static int sja1105_parse_dt(struct sja1105_private *priv, 880 struct sja1105_dt_port *ports) 881 { 882 struct device *dev = &priv->spidev->dev; 883 struct device_node *switch_node = dev->of_node; 884 struct device_node *ports_node; 885 int rc; 886 887 ports_node = of_get_child_by_name(switch_node, "ports"); 888 if (!ports_node) { 889 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 890 return -ENODEV; 891 } 892 893 rc = sja1105_parse_ports_node(priv, ports, ports_node); 894 of_node_put(ports_node); 895 896 return rc; 897 } 898 899 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg) 900 { 901 const struct sja1105_regs *regs = priv->info->regs; 902 u32 val; 903 int rc; 904 905 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val, 906 NULL); 907 if (rc < 0) 908 return rc; 909 910 return val; 911 } 912 913 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg, 914 u16 pcs_val) 915 { 916 const struct sja1105_regs *regs = priv->info->regs; 917 u32 val = pcs_val; 918 int rc; 919 920 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val, 921 NULL); 922 if (rc < 0) 923 return rc; 924 925 return val; 926 } 927 928 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv, 929 bool an_enabled, bool an_master) 930 { 931 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII; 932 933 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to 934 * stop the clock during LPI mode, make the MAC reconfigure 935 * autonomously after PCS autoneg is done, flush the internal FIFOs. 936 */ 937 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 | 938 SJA1105_DC1_CLOCK_STOP_EN | 939 SJA1105_DC1_MAC_AUTO_SW | 940 SJA1105_DC1_INIT); 941 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */ 942 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE); 943 /* AUTONEG_CONTROL: Use SGMII autoneg */ 944 if (an_master) 945 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK; 946 sja1105_sgmii_write(priv, SJA1105_AC, ac); 947 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise, 948 * sja1105_sgmii_pcs_force_speed must be called later for the link 949 * to become operational. 950 */ 951 if (an_enabled) 952 sja1105_sgmii_write(priv, MII_BMCR, 953 BMCR_ANENABLE | BMCR_ANRESTART); 954 } 955 956 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv, 957 int speed) 958 { 959 int pcs_speed; 960 961 switch (speed) { 962 case SPEED_1000: 963 pcs_speed = BMCR_SPEED1000; 964 break; 965 case SPEED_100: 966 pcs_speed = BMCR_SPEED100; 967 break; 968 case SPEED_10: 969 pcs_speed = BMCR_SPEED10; 970 break; 971 default: 972 dev_err(priv->ds->dev, "Invalid speed %d\n", speed); 973 return; 974 } 975 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX); 976 } 977 978 /* Convert link speed from SJA1105 to ethtool encoding */ 979 static int sja1105_speed[] = { 980 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN, 981 [SJA1105_SPEED_10MBPS] = SPEED_10, 982 [SJA1105_SPEED_100MBPS] = SPEED_100, 983 [SJA1105_SPEED_1000MBPS] = SPEED_1000, 984 }; 985 986 /* Set link speed in the MAC configuration for a specific port. */ 987 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 988 int speed_mbps) 989 { 990 struct sja1105_xmii_params_entry *mii; 991 struct sja1105_mac_config_entry *mac; 992 struct device *dev = priv->ds->dev; 993 sja1105_phy_interface_t phy_mode; 994 sja1105_speed_t speed; 995 int rc; 996 997 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 998 * tables. On E/T, MAC reconfig tables are not readable, only writable. 999 * We have to *know* what the MAC looks like. For the sake of keeping 1000 * the code common, we'll use the static configuration tables as a 1001 * reasonable approximation for both E/T and P/Q/R/S. 1002 */ 1003 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1004 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1005 1006 switch (speed_mbps) { 1007 case SPEED_UNKNOWN: 1008 /* PHYLINK called sja1105_mac_config() to inform us about 1009 * the state->interface, but AN has not completed and the 1010 * speed is not yet valid. UM10944.pdf says that setting 1011 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 1012 * ok for power consumption in case AN will never complete - 1013 * otherwise PHYLINK should come back with a new update. 1014 */ 1015 speed = SJA1105_SPEED_AUTO; 1016 break; 1017 case SPEED_10: 1018 speed = SJA1105_SPEED_10MBPS; 1019 break; 1020 case SPEED_100: 1021 speed = SJA1105_SPEED_100MBPS; 1022 break; 1023 case SPEED_1000: 1024 speed = SJA1105_SPEED_1000MBPS; 1025 break; 1026 default: 1027 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 1028 return -EINVAL; 1029 } 1030 1031 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 1032 * table, since this will be used for the clocking setup, and we no 1033 * longer need to store it in the static config (already told hardware 1034 * we want auto during upload phase). 1035 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and 1036 * we need to configure the PCS only (if even that). 1037 */ 1038 if (sja1105_supports_sgmii(priv, port)) 1039 mac[port].speed = SJA1105_SPEED_1000MBPS; 1040 else 1041 mac[port].speed = speed; 1042 1043 /* Write to the dynamic reconfiguration tables */ 1044 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1045 &mac[port], true); 1046 if (rc < 0) { 1047 dev_err(dev, "Failed to write MAC config: %d\n", rc); 1048 return rc; 1049 } 1050 1051 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 1052 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 1053 * RMII no change of the clock setup is required. Actually, changing 1054 * the clock setup does interrupt the clock signal for a certain time 1055 * which causes trouble for all PHYs relying on this signal. 1056 */ 1057 phy_mode = mii->xmii_mode[port]; 1058 if (phy_mode != XMII_MODE_RGMII) 1059 return 0; 1060 1061 return sja1105_clocking_setup_port(priv, port); 1062 } 1063 1064 /* The SJA1105 MAC programming model is through the static config (the xMII 1065 * Mode table cannot be dynamically reconfigured), and we have to program 1066 * that early (earlier than PHYLINK calls us, anyway). 1067 * So just error out in case the connected PHY attempts to change the initial 1068 * system interface MII protocol from what is defined in the DT, at least for 1069 * now. 1070 */ 1071 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 1072 phy_interface_t interface) 1073 { 1074 struct sja1105_xmii_params_entry *mii; 1075 sja1105_phy_interface_t phy_mode; 1076 1077 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1078 phy_mode = mii->xmii_mode[port]; 1079 1080 switch (interface) { 1081 case PHY_INTERFACE_MODE_MII: 1082 return (phy_mode != XMII_MODE_MII); 1083 case PHY_INTERFACE_MODE_RMII: 1084 return (phy_mode != XMII_MODE_RMII); 1085 case PHY_INTERFACE_MODE_RGMII: 1086 case PHY_INTERFACE_MODE_RGMII_ID: 1087 case PHY_INTERFACE_MODE_RGMII_RXID: 1088 case PHY_INTERFACE_MODE_RGMII_TXID: 1089 return (phy_mode != XMII_MODE_RGMII); 1090 case PHY_INTERFACE_MODE_SGMII: 1091 return (phy_mode != XMII_MODE_SGMII); 1092 default: 1093 return true; 1094 } 1095 } 1096 1097 static void sja1105_mac_config(struct dsa_switch *ds, int port, 1098 unsigned int mode, 1099 const struct phylink_link_state *state) 1100 { 1101 struct sja1105_private *priv = ds->priv; 1102 bool is_sgmii = sja1105_supports_sgmii(priv, port); 1103 1104 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1105 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", 1106 phy_modes(state->interface)); 1107 return; 1108 } 1109 1110 if (phylink_autoneg_inband(mode) && !is_sgmii) { 1111 dev_err(ds->dev, "In-band AN not supported!\n"); 1112 return; 1113 } 1114 1115 if (is_sgmii) 1116 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode), 1117 false); 1118 } 1119 1120 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 1121 unsigned int mode, 1122 phy_interface_t interface) 1123 { 1124 sja1105_inhibit_tx(ds->priv, BIT(port), true); 1125 } 1126 1127 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 1128 unsigned int mode, 1129 phy_interface_t interface, 1130 struct phy_device *phydev, 1131 int speed, int duplex, 1132 bool tx_pause, bool rx_pause) 1133 { 1134 struct sja1105_private *priv = ds->priv; 1135 1136 sja1105_adjust_port_config(priv, port, speed); 1137 1138 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode)) 1139 sja1105_sgmii_pcs_force_speed(priv, speed); 1140 1141 sja1105_inhibit_tx(priv, BIT(port), false); 1142 } 1143 1144 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 1145 unsigned long *supported, 1146 struct phylink_link_state *state) 1147 { 1148 /* Construct a new mask which exhaustively contains all link features 1149 * supported by the MAC, and then apply that (logical AND) to what will 1150 * be sent to the PHY for "marketing". 1151 */ 1152 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1153 struct sja1105_private *priv = ds->priv; 1154 struct sja1105_xmii_params_entry *mii; 1155 1156 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1157 1158 /* include/linux/phylink.h says: 1159 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 1160 * expects the MAC driver to return all supported link modes. 1161 */ 1162 if (state->interface != PHY_INTERFACE_MODE_NA && 1163 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1164 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1165 return; 1166 } 1167 1168 /* The MAC does not support pause frames, and also doesn't 1169 * support half-duplex traffic modes. 1170 */ 1171 phylink_set(mask, Autoneg); 1172 phylink_set(mask, MII); 1173 phylink_set(mask, 10baseT_Full); 1174 phylink_set(mask, 100baseT_Full); 1175 phylink_set(mask, 100baseT1_Full); 1176 if (mii->xmii_mode[port] == XMII_MODE_RGMII || 1177 mii->xmii_mode[port] == XMII_MODE_SGMII) 1178 phylink_set(mask, 1000baseT_Full); 1179 1180 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 1181 bitmap_and(state->advertising, state->advertising, mask, 1182 __ETHTOOL_LINK_MODE_MASK_NBITS); 1183 } 1184 1185 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port, 1186 struct phylink_link_state *state) 1187 { 1188 struct sja1105_private *priv = ds->priv; 1189 int ais; 1190 1191 /* Read the vendor-specific AUTONEG_INTR_STATUS register */ 1192 ais = sja1105_sgmii_read(priv, SJA1105_AIS); 1193 if (ais < 0) 1194 return ais; 1195 1196 switch (SJA1105_AIS_SPEED(ais)) { 1197 case 0: 1198 state->speed = SPEED_10; 1199 break; 1200 case 1: 1201 state->speed = SPEED_100; 1202 break; 1203 case 2: 1204 state->speed = SPEED_1000; 1205 break; 1206 default: 1207 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n", 1208 SJA1105_AIS_SPEED(ais)); 1209 } 1210 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais); 1211 state->an_complete = SJA1105_AIS_COMPLETE(ais); 1212 state->link = SJA1105_AIS_LINK_STATUS(ais); 1213 1214 return 0; 1215 } 1216 1217 static int 1218 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 1219 const struct sja1105_l2_lookup_entry *requested) 1220 { 1221 struct sja1105_l2_lookup_entry *l2_lookup; 1222 struct sja1105_table *table; 1223 int i; 1224 1225 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1226 l2_lookup = table->entries; 1227 1228 for (i = 0; i < table->entry_count; i++) 1229 if (l2_lookup[i].macaddr == requested->macaddr && 1230 l2_lookup[i].vlanid == requested->vlanid && 1231 l2_lookup[i].destports & BIT(port)) 1232 return i; 1233 1234 return -1; 1235 } 1236 1237 /* We want FDB entries added statically through the bridge command to persist 1238 * across switch resets, which are a common thing during normal SJA1105 1239 * operation. So we have to back them up in the static configuration tables 1240 * and hence apply them on next static config upload... yay! 1241 */ 1242 static int 1243 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 1244 const struct sja1105_l2_lookup_entry *requested, 1245 bool keep) 1246 { 1247 struct sja1105_l2_lookup_entry *l2_lookup; 1248 struct sja1105_table *table; 1249 int rc, match; 1250 1251 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1252 1253 match = sja1105_find_static_fdb_entry(priv, port, requested); 1254 if (match < 0) { 1255 /* Can't delete a missing entry. */ 1256 if (!keep) 1257 return 0; 1258 1259 /* No match => new entry */ 1260 rc = sja1105_table_resize(table, table->entry_count + 1); 1261 if (rc) 1262 return rc; 1263 1264 match = table->entry_count - 1; 1265 } 1266 1267 /* Assign pointer after the resize (it may be new memory) */ 1268 l2_lookup = table->entries; 1269 1270 /* We have a match. 1271 * If the job was to add this FDB entry, it's already done (mostly 1272 * anyway, since the port forwarding mask may have changed, case in 1273 * which we update it). 1274 * Otherwise we have to delete it. 1275 */ 1276 if (keep) { 1277 l2_lookup[match] = *requested; 1278 return 0; 1279 } 1280 1281 /* To remove, the strategy is to overwrite the element with 1282 * the last one, and then reduce the array size by 1 1283 */ 1284 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 1285 return sja1105_table_resize(table, table->entry_count - 1); 1286 } 1287 1288 /* First-generation switches have a 4-way set associative TCAM that 1289 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 1290 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 1291 * For the placement of a newly learnt FDB entry, the switch selects the bin 1292 * based on a hash function, and the way within that bin incrementally. 1293 */ 1294 static int sja1105et_fdb_index(int bin, int way) 1295 { 1296 return bin * SJA1105ET_FDB_BIN_SIZE + way; 1297 } 1298 1299 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 1300 const u8 *addr, u16 vid, 1301 struct sja1105_l2_lookup_entry *match, 1302 int *last_unused) 1303 { 1304 int way; 1305 1306 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 1307 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1308 int index = sja1105et_fdb_index(bin, way); 1309 1310 /* Skip unused entries, optionally marking them 1311 * into the return value 1312 */ 1313 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1314 index, &l2_lookup)) { 1315 if (last_unused) 1316 *last_unused = way; 1317 continue; 1318 } 1319 1320 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 1321 l2_lookup.vlanid == vid) { 1322 if (match) 1323 *match = l2_lookup; 1324 return way; 1325 } 1326 } 1327 /* Return an invalid entry index if not found */ 1328 return -1; 1329 } 1330 1331 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 1332 const unsigned char *addr, u16 vid) 1333 { 1334 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1335 struct sja1105_private *priv = ds->priv; 1336 struct device *dev = ds->dev; 1337 int last_unused = -1; 1338 int bin, way, rc; 1339 1340 bin = sja1105et_fdb_hash(priv, addr, vid); 1341 1342 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1343 &l2_lookup, &last_unused); 1344 if (way >= 0) { 1345 /* We have an FDB entry. Is our port in the destination 1346 * mask? If yes, we need to do nothing. If not, we need 1347 * to rewrite the entry by adding this port to it. 1348 */ 1349 if (l2_lookup.destports & BIT(port)) 1350 return 0; 1351 l2_lookup.destports |= BIT(port); 1352 } else { 1353 int index = sja1105et_fdb_index(bin, way); 1354 1355 /* We don't have an FDB entry. We construct a new one and 1356 * try to find a place for it within the FDB table. 1357 */ 1358 l2_lookup.macaddr = ether_addr_to_u64(addr); 1359 l2_lookup.destports = BIT(port); 1360 l2_lookup.vlanid = vid; 1361 1362 if (last_unused >= 0) { 1363 way = last_unused; 1364 } else { 1365 /* Bin is full, need to evict somebody. 1366 * Choose victim at random. If you get these messages 1367 * often, you may need to consider changing the 1368 * distribution function: 1369 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1370 */ 1371 get_random_bytes(&way, sizeof(u8)); 1372 way %= SJA1105ET_FDB_BIN_SIZE; 1373 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1374 bin, addr, way); 1375 /* Evict entry */ 1376 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1377 index, NULL, false); 1378 } 1379 } 1380 l2_lookup.index = sja1105et_fdb_index(bin, way); 1381 1382 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1383 l2_lookup.index, &l2_lookup, 1384 true); 1385 if (rc < 0) 1386 return rc; 1387 1388 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1389 } 1390 1391 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1392 const unsigned char *addr, u16 vid) 1393 { 1394 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1395 struct sja1105_private *priv = ds->priv; 1396 int index, bin, way, rc; 1397 bool keep; 1398 1399 bin = sja1105et_fdb_hash(priv, addr, vid); 1400 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1401 &l2_lookup, NULL); 1402 if (way < 0) 1403 return 0; 1404 index = sja1105et_fdb_index(bin, way); 1405 1406 /* We have an FDB entry. Is our port in the destination mask? If yes, 1407 * we need to remove it. If the resulting port mask becomes empty, we 1408 * need to completely evict the FDB entry. 1409 * Otherwise we just write it back. 1410 */ 1411 l2_lookup.destports &= ~BIT(port); 1412 1413 if (l2_lookup.destports) 1414 keep = true; 1415 else 1416 keep = false; 1417 1418 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1419 index, &l2_lookup, keep); 1420 if (rc < 0) 1421 return rc; 1422 1423 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1424 } 1425 1426 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1427 const unsigned char *addr, u16 vid) 1428 { 1429 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1430 struct sja1105_private *priv = ds->priv; 1431 int rc, i; 1432 1433 /* Search for an existing entry in the FDB table */ 1434 l2_lookup.macaddr = ether_addr_to_u64(addr); 1435 l2_lookup.vlanid = vid; 1436 l2_lookup.iotag = SJA1105_S_TAG; 1437 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1438 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { 1439 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1440 l2_lookup.mask_iotag = BIT(0); 1441 } else { 1442 l2_lookup.mask_vlanid = 0; 1443 l2_lookup.mask_iotag = 0; 1444 } 1445 l2_lookup.destports = BIT(port); 1446 1447 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1448 SJA1105_SEARCH, &l2_lookup); 1449 if (rc == 0) { 1450 /* Found and this port is already in the entry's 1451 * port mask => job done 1452 */ 1453 if (l2_lookup.destports & BIT(port)) 1454 return 0; 1455 /* l2_lookup.index is populated by the switch in case it 1456 * found something. 1457 */ 1458 l2_lookup.destports |= BIT(port); 1459 goto skip_finding_an_index; 1460 } 1461 1462 /* Not found, so try to find an unused spot in the FDB. 1463 * This is slightly inefficient because the strategy is knock-knock at 1464 * every possible position from 0 to 1023. 1465 */ 1466 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1467 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1468 i, NULL); 1469 if (rc < 0) 1470 break; 1471 } 1472 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1473 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1474 return -EINVAL; 1475 } 1476 l2_lookup.lockeds = true; 1477 l2_lookup.index = i; 1478 1479 skip_finding_an_index: 1480 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1481 l2_lookup.index, &l2_lookup, 1482 true); 1483 if (rc < 0) 1484 return rc; 1485 1486 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1487 } 1488 1489 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1490 const unsigned char *addr, u16 vid) 1491 { 1492 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1493 struct sja1105_private *priv = ds->priv; 1494 bool keep; 1495 int rc; 1496 1497 l2_lookup.macaddr = ether_addr_to_u64(addr); 1498 l2_lookup.vlanid = vid; 1499 l2_lookup.iotag = SJA1105_S_TAG; 1500 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1501 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { 1502 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1503 l2_lookup.mask_iotag = BIT(0); 1504 } else { 1505 l2_lookup.mask_vlanid = 0; 1506 l2_lookup.mask_iotag = 0; 1507 } 1508 l2_lookup.destports = BIT(port); 1509 1510 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1511 SJA1105_SEARCH, &l2_lookup); 1512 if (rc < 0) 1513 return 0; 1514 1515 l2_lookup.destports &= ~BIT(port); 1516 1517 /* Decide whether we remove just this port from the FDB entry, 1518 * or if we remove it completely. 1519 */ 1520 if (l2_lookup.destports) 1521 keep = true; 1522 else 1523 keep = false; 1524 1525 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1526 l2_lookup.index, &l2_lookup, keep); 1527 if (rc < 0) 1528 return rc; 1529 1530 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1531 } 1532 1533 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1534 const unsigned char *addr, u16 vid) 1535 { 1536 struct sja1105_private *priv = ds->priv; 1537 1538 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1539 * so the switch still does some VLAN processing internally. 1540 * But Shared VLAN Learning (SVL) is also active, and it will take 1541 * care of autonomous forwarding between the unique pvid's of each 1542 * port. Here we just make sure that users can't add duplicate FDB 1543 * entries when in this mode - the actual VID doesn't matter except 1544 * for what gets printed in 'bridge fdb show'. In the case of zero, 1545 * no VID gets printed at all. 1546 */ 1547 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) 1548 vid = 0; 1549 1550 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1551 } 1552 1553 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1554 const unsigned char *addr, u16 vid) 1555 { 1556 struct sja1105_private *priv = ds->priv; 1557 1558 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) 1559 vid = 0; 1560 1561 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1562 } 1563 1564 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1565 dsa_fdb_dump_cb_t *cb, void *data) 1566 { 1567 struct sja1105_private *priv = ds->priv; 1568 struct device *dev = ds->dev; 1569 int i; 1570 1571 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1572 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1573 u8 macaddr[ETH_ALEN]; 1574 int rc; 1575 1576 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1577 i, &l2_lookup); 1578 /* No fdb entry at i, not an issue */ 1579 if (rc == -ENOENT) 1580 continue; 1581 if (rc) { 1582 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1583 return rc; 1584 } 1585 1586 /* FDB dump callback is per port. This means we have to 1587 * disregard a valid entry if it's not for this port, even if 1588 * only to revisit it later. This is inefficient because the 1589 * 1024-sized FDB table needs to be traversed 4 times through 1590 * SPI during a 'bridge fdb show' command. 1591 */ 1592 if (!(l2_lookup.destports & BIT(port))) 1593 continue; 1594 1595 /* We need to hide the FDB entry for unknown multicast */ 1596 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST && 1597 l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST) 1598 continue; 1599 1600 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1601 1602 /* We need to hide the dsa_8021q VLANs from the user. */ 1603 if (priv->vlan_state == SJA1105_VLAN_UNAWARE) 1604 l2_lookup.vlanid = 0; 1605 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1606 } 1607 return 0; 1608 } 1609 1610 static int sja1105_mdb_add(struct dsa_switch *ds, int port, 1611 const struct switchdev_obj_port_mdb *mdb) 1612 { 1613 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1614 } 1615 1616 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1617 const struct switchdev_obj_port_mdb *mdb) 1618 { 1619 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1620 } 1621 1622 /* Common function for unicast and broadcast flood configuration. 1623 * Flooding is configured between each {ingress, egress} port pair, and since 1624 * the bridge's semantics are those of "egress flooding", it means we must 1625 * enable flooding towards this port from all ingress ports that are in the 1626 * same forwarding domain. 1627 */ 1628 static int sja1105_manage_flood_domains(struct sja1105_private *priv) 1629 { 1630 struct sja1105_l2_forwarding_entry *l2_fwd; 1631 struct dsa_switch *ds = priv->ds; 1632 int from, to, rc; 1633 1634 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1635 1636 for (from = 0; from < ds->num_ports; from++) { 1637 u64 fl_domain = 0, bc_domain = 0; 1638 1639 for (to = 0; to < priv->ds->num_ports; to++) { 1640 if (!sja1105_can_forward(l2_fwd, from, to)) 1641 continue; 1642 1643 if (priv->ucast_egress_floods & BIT(to)) 1644 fl_domain |= BIT(to); 1645 if (priv->bcast_egress_floods & BIT(to)) 1646 bc_domain |= BIT(to); 1647 } 1648 1649 /* Nothing changed, nothing to do */ 1650 if (l2_fwd[from].fl_domain == fl_domain && 1651 l2_fwd[from].bc_domain == bc_domain) 1652 continue; 1653 1654 l2_fwd[from].fl_domain = fl_domain; 1655 l2_fwd[from].bc_domain = bc_domain; 1656 1657 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1658 from, &l2_fwd[from], true); 1659 if (rc < 0) 1660 return rc; 1661 } 1662 1663 return 0; 1664 } 1665 1666 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1667 struct net_device *br, bool member) 1668 { 1669 struct sja1105_l2_forwarding_entry *l2_fwd; 1670 struct sja1105_private *priv = ds->priv; 1671 int i, rc; 1672 1673 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1674 1675 for (i = 0; i < ds->num_ports; i++) { 1676 /* Add this port to the forwarding matrix of the 1677 * other ports in the same bridge, and viceversa. 1678 */ 1679 if (!dsa_is_user_port(ds, i)) 1680 continue; 1681 /* For the ports already under the bridge, only one thing needs 1682 * to be done, and that is to add this port to their 1683 * reachability domain. So we can perform the SPI write for 1684 * them immediately. However, for this port itself (the one 1685 * that is new to the bridge), we need to add all other ports 1686 * to its reachability domain. So we do that incrementally in 1687 * this loop, and perform the SPI write only at the end, once 1688 * the domain contains all other bridge ports. 1689 */ 1690 if (i == port) 1691 continue; 1692 if (dsa_to_port(ds, i)->bridge_dev != br) 1693 continue; 1694 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1695 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1696 1697 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1698 i, &l2_fwd[i], true); 1699 if (rc < 0) 1700 return rc; 1701 } 1702 1703 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1704 port, &l2_fwd[port], true); 1705 if (rc) 1706 return rc; 1707 1708 return sja1105_manage_flood_domains(priv); 1709 } 1710 1711 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1712 u8 state) 1713 { 1714 struct sja1105_private *priv = ds->priv; 1715 struct sja1105_mac_config_entry *mac; 1716 1717 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1718 1719 switch (state) { 1720 case BR_STATE_DISABLED: 1721 case BR_STATE_BLOCKING: 1722 /* From UM10944 description of DRPDTAG (why put this there?): 1723 * "Management traffic flows to the port regardless of the state 1724 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1725 * At the moment no difference between DISABLED and BLOCKING. 1726 */ 1727 mac[port].ingress = false; 1728 mac[port].egress = false; 1729 mac[port].dyn_learn = false; 1730 break; 1731 case BR_STATE_LISTENING: 1732 mac[port].ingress = true; 1733 mac[port].egress = false; 1734 mac[port].dyn_learn = false; 1735 break; 1736 case BR_STATE_LEARNING: 1737 mac[port].ingress = true; 1738 mac[port].egress = false; 1739 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); 1740 break; 1741 case BR_STATE_FORWARDING: 1742 mac[port].ingress = true; 1743 mac[port].egress = true; 1744 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); 1745 break; 1746 default: 1747 dev_err(ds->dev, "invalid STP state: %d\n", state); 1748 return; 1749 } 1750 1751 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1752 &mac[port], true); 1753 } 1754 1755 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1756 struct net_device *br) 1757 { 1758 return sja1105_bridge_member(ds, port, br, true); 1759 } 1760 1761 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1762 struct net_device *br) 1763 { 1764 sja1105_bridge_member(ds, port, br, false); 1765 } 1766 1767 #define BYTES_PER_KBIT (1000LL / 8) 1768 1769 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) 1770 { 1771 int i; 1772 1773 for (i = 0; i < priv->info->num_cbs_shapers; i++) 1774 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) 1775 return i; 1776 1777 return -1; 1778 } 1779 1780 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port, 1781 int prio) 1782 { 1783 int i; 1784 1785 for (i = 0; i < priv->info->num_cbs_shapers; i++) { 1786 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; 1787 1788 if (cbs->port == port && cbs->prio == prio) { 1789 memset(cbs, 0, sizeof(*cbs)); 1790 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, 1791 i, cbs, true); 1792 } 1793 } 1794 1795 return 0; 1796 } 1797 1798 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, 1799 struct tc_cbs_qopt_offload *offload) 1800 { 1801 struct sja1105_private *priv = ds->priv; 1802 struct sja1105_cbs_entry *cbs; 1803 int index; 1804 1805 if (!offload->enable) 1806 return sja1105_delete_cbs_shaper(priv, port, offload->queue); 1807 1808 index = sja1105_find_unused_cbs_shaper(priv); 1809 if (index < 0) 1810 return -ENOSPC; 1811 1812 cbs = &priv->cbs[index]; 1813 cbs->port = port; 1814 cbs->prio = offload->queue; 1815 /* locredit and sendslope are negative by definition. In hardware, 1816 * positive values must be provided, and the negative sign is implicit. 1817 */ 1818 cbs->credit_hi = offload->hicredit; 1819 cbs->credit_lo = abs(offload->locredit); 1820 /* User space is in kbits/sec, hardware in bytes/sec */ 1821 cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; 1822 cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); 1823 /* Convert the negative values from 64-bit 2's complement 1824 * to 32-bit 2's complement (for the case of 0x80000000 whose 1825 * negative is still negative). 1826 */ 1827 cbs->credit_lo &= GENMASK_ULL(31, 0); 1828 cbs->send_slope &= GENMASK_ULL(31, 0); 1829 1830 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs, 1831 true); 1832 } 1833 1834 static int sja1105_reload_cbs(struct sja1105_private *priv) 1835 { 1836 int rc = 0, i; 1837 1838 for (i = 0; i < priv->info->num_cbs_shapers; i++) { 1839 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; 1840 1841 if (!cbs->idle_slope && !cbs->send_slope) 1842 continue; 1843 1844 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs, 1845 true); 1846 if (rc) 1847 break; 1848 } 1849 1850 return rc; 1851 } 1852 1853 static const char * const sja1105_reset_reasons[] = { 1854 [SJA1105_VLAN_FILTERING] = "VLAN filtering", 1855 [SJA1105_RX_HWTSTAMPING] = "RX timestamping", 1856 [SJA1105_AGEING_TIME] = "Ageing time", 1857 [SJA1105_SCHEDULING] = "Time-aware scheduling", 1858 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing", 1859 [SJA1105_VIRTUAL_LINKS] = "Virtual links", 1860 }; 1861 1862 /* For situations where we need to change a setting at runtime that is only 1863 * available through the static configuration, resetting the switch in order 1864 * to upload the new static config is unavoidable. Back up the settings we 1865 * modify at runtime (currently only MAC) and restore them after uploading, 1866 * such that this operation is relatively seamless. 1867 */ 1868 int sja1105_static_config_reload(struct sja1105_private *priv, 1869 enum sja1105_reset_reason reason) 1870 { 1871 struct ptp_system_timestamp ptp_sts_before; 1872 struct ptp_system_timestamp ptp_sts_after; 1873 int speed_mbps[SJA1105_MAX_NUM_PORTS]; 1874 struct sja1105_mac_config_entry *mac; 1875 struct dsa_switch *ds = priv->ds; 1876 s64 t1, t2, t3, t4; 1877 s64 t12, t34; 1878 u16 bmcr = 0; 1879 int rc, i; 1880 s64 now; 1881 1882 mutex_lock(&priv->mgmt_lock); 1883 1884 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1885 1886 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1887 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1888 * switch wants to see in the static config in order to allow us to 1889 * change it through the dynamic interface later. 1890 */ 1891 for (i = 0; i < ds->num_ports; i++) { 1892 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1893 mac[i].speed = SJA1105_SPEED_AUTO; 1894 } 1895 1896 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) 1897 bmcr = sja1105_sgmii_read(priv, MII_BMCR); 1898 1899 /* No PTP operations can run right now */ 1900 mutex_lock(&priv->ptp_data.lock); 1901 1902 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before); 1903 if (rc < 0) 1904 goto out_unlock_ptp; 1905 1906 /* Reset switch and send updated static configuration */ 1907 rc = sja1105_static_config_upload(priv); 1908 if (rc < 0) 1909 goto out_unlock_ptp; 1910 1911 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after); 1912 if (rc < 0) 1913 goto out_unlock_ptp; 1914 1915 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts); 1916 t2 = timespec64_to_ns(&ptp_sts_before.post_ts); 1917 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts); 1918 t4 = timespec64_to_ns(&ptp_sts_after.post_ts); 1919 /* Mid point, corresponds to pre-reset PTPCLKVAL */ 1920 t12 = t1 + (t2 - t1) / 2; 1921 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ 1922 t34 = t3 + (t4 - t3) / 2; 1923 /* Advance PTPCLKVAL by the time it took since its readout */ 1924 now += (t34 - t12); 1925 1926 __sja1105_ptp_adjtime(ds, now); 1927 1928 out_unlock_ptp: 1929 mutex_unlock(&priv->ptp_data.lock); 1930 1931 dev_info(priv->ds->dev, 1932 "Reset switch and programmed static config. Reason: %s\n", 1933 sja1105_reset_reasons[reason]); 1934 1935 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1936 * For these interfaces there is no dynamic configuration 1937 * needed, since PLLs have same settings at all speeds. 1938 */ 1939 rc = priv->info->clocking_setup(priv); 1940 if (rc < 0) 1941 goto out; 1942 1943 for (i = 0; i < ds->num_ports; i++) { 1944 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1945 if (rc < 0) 1946 goto out; 1947 } 1948 1949 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) { 1950 bool an_enabled = !!(bmcr & BMCR_ANENABLE); 1951 1952 sja1105_sgmii_pcs_config(priv, an_enabled, false); 1953 1954 if (!an_enabled) { 1955 int speed = SPEED_UNKNOWN; 1956 1957 if (bmcr & BMCR_SPEED1000) 1958 speed = SPEED_1000; 1959 else if (bmcr & BMCR_SPEED100) 1960 speed = SPEED_100; 1961 else 1962 speed = SPEED_10; 1963 1964 sja1105_sgmii_pcs_force_speed(priv, speed); 1965 } 1966 } 1967 1968 rc = sja1105_reload_cbs(priv); 1969 if (rc < 0) 1970 goto out; 1971 out: 1972 mutex_unlock(&priv->mgmt_lock); 1973 1974 return rc; 1975 } 1976 1977 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1978 { 1979 struct sja1105_mac_config_entry *mac; 1980 1981 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1982 1983 mac[port].vlanid = pvid; 1984 1985 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1986 &mac[port], true); 1987 } 1988 1989 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds, 1990 int tree_index, int sw_index, 1991 int other_port, struct net_device *br) 1992 { 1993 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); 1994 struct sja1105_private *other_priv = other_ds->priv; 1995 struct sja1105_private *priv = ds->priv; 1996 int port, rc; 1997 1998 if (other_ds->ops != &sja1105_switch_ops) 1999 return 0; 2000 2001 for (port = 0; port < ds->num_ports; port++) { 2002 if (!dsa_is_user_port(ds, port)) 2003 continue; 2004 if (dsa_to_port(ds, port)->bridge_dev != br) 2005 continue; 2006 2007 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx, 2008 port, 2009 other_priv->dsa_8021q_ctx, 2010 other_port); 2011 if (rc) 2012 return rc; 2013 2014 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx, 2015 other_port, 2016 priv->dsa_8021q_ctx, 2017 port); 2018 if (rc) 2019 return rc; 2020 } 2021 2022 return 0; 2023 } 2024 2025 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds, 2026 int tree_index, int sw_index, 2027 int other_port, 2028 struct net_device *br) 2029 { 2030 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); 2031 struct sja1105_private *other_priv = other_ds->priv; 2032 struct sja1105_private *priv = ds->priv; 2033 int port; 2034 2035 if (other_ds->ops != &sja1105_switch_ops) 2036 return; 2037 2038 for (port = 0; port < ds->num_ports; port++) { 2039 if (!dsa_is_user_port(ds, port)) 2040 continue; 2041 if (dsa_to_port(ds, port)->bridge_dev != br) 2042 continue; 2043 2044 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port, 2045 other_priv->dsa_8021q_ctx, 2046 other_port); 2047 2048 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx, 2049 other_port, 2050 priv->dsa_8021q_ctx, port); 2051 } 2052 } 2053 2054 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 2055 { 2056 struct sja1105_private *priv = ds->priv; 2057 int rc; 2058 2059 rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled); 2060 if (rc) 2061 return rc; 2062 2063 dev_info(ds->dev, "%s switch tagging\n", 2064 enabled ? "Enabled" : "Disabled"); 2065 return 0; 2066 } 2067 2068 static enum dsa_tag_protocol 2069 sja1105_get_tag_protocol(struct dsa_switch *ds, int port, 2070 enum dsa_tag_protocol mp) 2071 { 2072 return DSA_TAG_PROTO_SJA1105; 2073 } 2074 2075 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid) 2076 { 2077 int subvlan; 2078 2079 if (pvid) 2080 return 0; 2081 2082 for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2083 if (subvlan_map[subvlan] == VLAN_N_VID) 2084 return subvlan; 2085 2086 return -1; 2087 } 2088 2089 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid) 2090 { 2091 int subvlan; 2092 2093 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2094 if (subvlan_map[subvlan] == vid) 2095 return subvlan; 2096 2097 return -1; 2098 } 2099 2100 static int sja1105_find_committed_subvlan(struct sja1105_private *priv, 2101 int port, u16 vid) 2102 { 2103 struct sja1105_port *sp = &priv->ports[port]; 2104 2105 return sja1105_find_subvlan(sp->subvlan_map, vid); 2106 } 2107 2108 static void sja1105_init_subvlan_map(u16 *subvlan_map) 2109 { 2110 int subvlan; 2111 2112 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2113 subvlan_map[subvlan] = VLAN_N_VID; 2114 } 2115 2116 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port, 2117 u16 *subvlan_map) 2118 { 2119 struct sja1105_port *sp = &priv->ports[port]; 2120 int subvlan; 2121 2122 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2123 sp->subvlan_map[subvlan] = subvlan_map[subvlan]; 2124 } 2125 2126 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 2127 { 2128 struct sja1105_vlan_lookup_entry *vlan; 2129 int count, i; 2130 2131 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 2132 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 2133 2134 for (i = 0; i < count; i++) 2135 if (vlan[i].vlanid == vid) 2136 return i; 2137 2138 /* Return an invalid entry index if not found */ 2139 return -1; 2140 } 2141 2142 static int 2143 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging, 2144 int count, int from_port, u16 from_vid, 2145 u16 to_vid) 2146 { 2147 int i; 2148 2149 for (i = 0; i < count; i++) 2150 if (retagging[i].ing_port == BIT(from_port) && 2151 retagging[i].vlan_ing == from_vid && 2152 retagging[i].vlan_egr == to_vid) 2153 return i; 2154 2155 /* Return an invalid entry index if not found */ 2156 return -1; 2157 } 2158 2159 static int sja1105_commit_vlans(struct sja1105_private *priv, 2160 struct sja1105_vlan_lookup_entry *new_vlan, 2161 struct sja1105_retagging_entry *new_retagging, 2162 int num_retagging) 2163 { 2164 struct sja1105_retagging_entry *retagging; 2165 struct sja1105_vlan_lookup_entry *vlan; 2166 struct sja1105_table *table; 2167 int num_vlans = 0; 2168 int rc, i, k = 0; 2169 2170 /* VLAN table */ 2171 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2172 vlan = table->entries; 2173 2174 for (i = 0; i < VLAN_N_VID; i++) { 2175 int match = sja1105_is_vlan_configured(priv, i); 2176 2177 if (new_vlan[i].vlanid != VLAN_N_VID) 2178 num_vlans++; 2179 2180 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) { 2181 /* Was there before, no longer is. Delete */ 2182 dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i); 2183 rc = sja1105_dynamic_config_write(priv, 2184 BLK_IDX_VLAN_LOOKUP, 2185 i, &vlan[match], false); 2186 if (rc < 0) 2187 return rc; 2188 } else if (new_vlan[i].vlanid != VLAN_N_VID) { 2189 /* Nothing changed, don't do anything */ 2190 if (match >= 0 && 2191 vlan[match].vlanid == new_vlan[i].vlanid && 2192 vlan[match].tag_port == new_vlan[i].tag_port && 2193 vlan[match].vlan_bc == new_vlan[i].vlan_bc && 2194 vlan[match].vmemb_port == new_vlan[i].vmemb_port) 2195 continue; 2196 /* Update entry */ 2197 dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i); 2198 rc = sja1105_dynamic_config_write(priv, 2199 BLK_IDX_VLAN_LOOKUP, 2200 i, &new_vlan[i], 2201 true); 2202 if (rc < 0) 2203 return rc; 2204 } 2205 } 2206 2207 if (table->entry_count) 2208 kfree(table->entries); 2209 2210 table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size, 2211 GFP_KERNEL); 2212 if (!table->entries) 2213 return -ENOMEM; 2214 2215 table->entry_count = num_vlans; 2216 vlan = table->entries; 2217 2218 for (i = 0; i < VLAN_N_VID; i++) { 2219 if (new_vlan[i].vlanid == VLAN_N_VID) 2220 continue; 2221 vlan[k++] = new_vlan[i]; 2222 } 2223 2224 /* VLAN Retagging Table */ 2225 table = &priv->static_config.tables[BLK_IDX_RETAGGING]; 2226 retagging = table->entries; 2227 2228 for (i = 0; i < table->entry_count; i++) { 2229 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, 2230 i, &retagging[i], false); 2231 if (rc) 2232 return rc; 2233 } 2234 2235 if (table->entry_count) 2236 kfree(table->entries); 2237 2238 table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size, 2239 GFP_KERNEL); 2240 if (!table->entries) 2241 return -ENOMEM; 2242 2243 table->entry_count = num_retagging; 2244 retagging = table->entries; 2245 2246 for (i = 0; i < num_retagging; i++) { 2247 retagging[i] = new_retagging[i]; 2248 2249 /* Update entry */ 2250 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, 2251 i, &retagging[i], true); 2252 if (rc < 0) 2253 return rc; 2254 } 2255 2256 return 0; 2257 } 2258 2259 struct sja1105_crosschip_vlan { 2260 struct list_head list; 2261 u16 vid; 2262 bool untagged; 2263 int port; 2264 int other_port; 2265 struct dsa_8021q_context *other_ctx; 2266 }; 2267 2268 struct sja1105_crosschip_switch { 2269 struct list_head list; 2270 struct dsa_8021q_context *other_ctx; 2271 }; 2272 2273 static int sja1105_commit_pvid(struct sja1105_private *priv) 2274 { 2275 struct sja1105_bridge_vlan *v; 2276 struct list_head *vlan_list; 2277 int rc = 0; 2278 2279 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2280 vlan_list = &priv->bridge_vlans; 2281 else 2282 vlan_list = &priv->dsa_8021q_vlans; 2283 2284 list_for_each_entry(v, vlan_list, list) { 2285 if (v->pvid) { 2286 rc = sja1105_pvid_apply(priv, v->port, v->vid); 2287 if (rc) 2288 break; 2289 } 2290 } 2291 2292 return rc; 2293 } 2294 2295 static int 2296 sja1105_build_bridge_vlans(struct sja1105_private *priv, 2297 struct sja1105_vlan_lookup_entry *new_vlan) 2298 { 2299 struct sja1105_bridge_vlan *v; 2300 2301 if (priv->vlan_state == SJA1105_VLAN_UNAWARE) 2302 return 0; 2303 2304 list_for_each_entry(v, &priv->bridge_vlans, list) { 2305 int match = v->vid; 2306 2307 new_vlan[match].vlanid = v->vid; 2308 new_vlan[match].vmemb_port |= BIT(v->port); 2309 new_vlan[match].vlan_bc |= BIT(v->port); 2310 if (!v->untagged) 2311 new_vlan[match].tag_port |= BIT(v->port); 2312 } 2313 2314 return 0; 2315 } 2316 2317 static int 2318 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv, 2319 struct sja1105_vlan_lookup_entry *new_vlan) 2320 { 2321 struct sja1105_bridge_vlan *v; 2322 2323 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2324 return 0; 2325 2326 list_for_each_entry(v, &priv->dsa_8021q_vlans, list) { 2327 int match = v->vid; 2328 2329 new_vlan[match].vlanid = v->vid; 2330 new_vlan[match].vmemb_port |= BIT(v->port); 2331 new_vlan[match].vlan_bc |= BIT(v->port); 2332 if (!v->untagged) 2333 new_vlan[match].tag_port |= BIT(v->port); 2334 } 2335 2336 return 0; 2337 } 2338 2339 static int sja1105_build_subvlans(struct sja1105_private *priv, 2340 u16 subvlan_map[][DSA_8021Q_N_SUBVLAN], 2341 struct sja1105_vlan_lookup_entry *new_vlan, 2342 struct sja1105_retagging_entry *new_retagging, 2343 int *num_retagging) 2344 { 2345 struct sja1105_bridge_vlan *v; 2346 int k = *num_retagging; 2347 2348 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) 2349 return 0; 2350 2351 list_for_each_entry(v, &priv->bridge_vlans, list) { 2352 int upstream = dsa_upstream_port(priv->ds, v->port); 2353 int match, subvlan; 2354 u16 rx_vid; 2355 2356 /* Only sub-VLANs on user ports need to be applied. 2357 * Bridge VLANs also include VLANs added automatically 2358 * by DSA on the CPU port. 2359 */ 2360 if (!dsa_is_user_port(priv->ds, v->port)) 2361 continue; 2362 2363 subvlan = sja1105_find_subvlan(subvlan_map[v->port], 2364 v->vid); 2365 if (subvlan < 0) { 2366 subvlan = sja1105_find_free_subvlan(subvlan_map[v->port], 2367 v->pvid); 2368 if (subvlan < 0) { 2369 dev_err(priv->ds->dev, "No more free subvlans\n"); 2370 return -ENOSPC; 2371 } 2372 } 2373 2374 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan); 2375 2376 /* @v->vid on @v->port needs to be retagged to @rx_vid 2377 * on @upstream. Assume @v->vid on @v->port and on 2378 * @upstream was already configured by the previous 2379 * iteration over bridge_vlans. 2380 */ 2381 match = rx_vid; 2382 new_vlan[match].vlanid = rx_vid; 2383 new_vlan[match].vmemb_port |= BIT(v->port); 2384 new_vlan[match].vmemb_port |= BIT(upstream); 2385 new_vlan[match].vlan_bc |= BIT(v->port); 2386 new_vlan[match].vlan_bc |= BIT(upstream); 2387 /* The "untagged" flag is set the same as for the 2388 * original VLAN 2389 */ 2390 if (!v->untagged) 2391 new_vlan[match].tag_port |= BIT(v->port); 2392 /* But it's always tagged towards the CPU */ 2393 new_vlan[match].tag_port |= BIT(upstream); 2394 2395 /* The Retagging Table generates packet *clones* with 2396 * the new VLAN. This is a very odd hardware quirk 2397 * which we need to suppress by dropping the original 2398 * packet. 2399 * Deny egress of the original VLAN towards the CPU 2400 * port. This will force the switch to drop it, and 2401 * we'll see only the retagged packets. 2402 */ 2403 match = v->vid; 2404 new_vlan[match].vlan_bc &= ~BIT(upstream); 2405 2406 /* And the retagging itself */ 2407 new_retagging[k].vlan_ing = v->vid; 2408 new_retagging[k].vlan_egr = rx_vid; 2409 new_retagging[k].ing_port = BIT(v->port); 2410 new_retagging[k].egr_port = BIT(upstream); 2411 if (k++ == SJA1105_MAX_RETAGGING_COUNT) { 2412 dev_err(priv->ds->dev, "No more retagging rules\n"); 2413 return -ENOSPC; 2414 } 2415 2416 subvlan_map[v->port][subvlan] = v->vid; 2417 } 2418 2419 *num_retagging = k; 2420 2421 return 0; 2422 } 2423 2424 /* Sadly, in crosschip scenarios where the CPU port is also the link to another 2425 * switch, we should retag backwards (the dsa_8021q vid to the original vid) on 2426 * the CPU port of neighbour switches. 2427 */ 2428 static int 2429 sja1105_build_crosschip_subvlans(struct sja1105_private *priv, 2430 struct sja1105_vlan_lookup_entry *new_vlan, 2431 struct sja1105_retagging_entry *new_retagging, 2432 int *num_retagging) 2433 { 2434 struct sja1105_crosschip_vlan *tmp, *pos; 2435 struct dsa_8021q_crosschip_link *c; 2436 struct sja1105_bridge_vlan *v, *w; 2437 struct list_head crosschip_vlans; 2438 int k = *num_retagging; 2439 int rc = 0; 2440 2441 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) 2442 return 0; 2443 2444 INIT_LIST_HEAD(&crosschip_vlans); 2445 2446 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { 2447 struct sja1105_private *other_priv = c->other_ctx->ds->priv; 2448 2449 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2450 continue; 2451 2452 /* Crosschip links are also added to the CPU ports. 2453 * Ignore those. 2454 */ 2455 if (!dsa_is_user_port(priv->ds, c->port)) 2456 continue; 2457 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port)) 2458 continue; 2459 2460 /* Search for VLANs on the remote port */ 2461 list_for_each_entry(v, &other_priv->bridge_vlans, list) { 2462 bool already_added = false; 2463 bool we_have_it = false; 2464 2465 if (v->port != c->other_port) 2466 continue; 2467 2468 /* If @v is a pvid on @other_ds, it does not need 2469 * re-retagging, because its SVL field is 0 and we 2470 * already allow that, via the dsa_8021q crosschip 2471 * links. 2472 */ 2473 if (v->pvid) 2474 continue; 2475 2476 /* Search for the VLAN on our local port */ 2477 list_for_each_entry(w, &priv->bridge_vlans, list) { 2478 if (w->port == c->port && w->vid == v->vid) { 2479 we_have_it = true; 2480 break; 2481 } 2482 } 2483 2484 if (!we_have_it) 2485 continue; 2486 2487 list_for_each_entry(tmp, &crosschip_vlans, list) { 2488 if (tmp->vid == v->vid && 2489 tmp->untagged == v->untagged && 2490 tmp->port == c->port && 2491 tmp->other_port == v->port && 2492 tmp->other_ctx == c->other_ctx) { 2493 already_added = true; 2494 break; 2495 } 2496 } 2497 2498 if (already_added) 2499 continue; 2500 2501 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2502 if (!tmp) { 2503 dev_err(priv->ds->dev, "Failed to allocate memory\n"); 2504 rc = -ENOMEM; 2505 goto out; 2506 } 2507 tmp->vid = v->vid; 2508 tmp->port = c->port; 2509 tmp->other_port = v->port; 2510 tmp->other_ctx = c->other_ctx; 2511 tmp->untagged = v->untagged; 2512 list_add(&tmp->list, &crosschip_vlans); 2513 } 2514 } 2515 2516 list_for_each_entry(tmp, &crosschip_vlans, list) { 2517 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv; 2518 int upstream = dsa_upstream_port(priv->ds, tmp->port); 2519 int match, subvlan; 2520 u16 rx_vid; 2521 2522 subvlan = sja1105_find_committed_subvlan(other_priv, 2523 tmp->other_port, 2524 tmp->vid); 2525 /* If this happens, it's a bug. The neighbour switch does not 2526 * have a subvlan for tmp->vid on tmp->other_port, but it 2527 * should, since we already checked for its vlan_state. 2528 */ 2529 if (WARN_ON(subvlan < 0)) { 2530 rc = -EINVAL; 2531 goto out; 2532 } 2533 2534 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds, 2535 tmp->other_port, 2536 subvlan); 2537 2538 /* The @rx_vid retagged from @tmp->vid on 2539 * {@tmp->other_ds, @tmp->other_port} needs to be 2540 * re-retagged to @tmp->vid on the way back to us. 2541 * 2542 * Assume the original @tmp->vid is already configured 2543 * on this local switch, otherwise we wouldn't be 2544 * retagging its subvlan on the other switch in the 2545 * first place. We just need to add a reverse retagging 2546 * rule for @rx_vid and install @rx_vid on our ports. 2547 */ 2548 match = rx_vid; 2549 new_vlan[match].vlanid = rx_vid; 2550 new_vlan[match].vmemb_port |= BIT(tmp->port); 2551 new_vlan[match].vmemb_port |= BIT(upstream); 2552 /* The "untagged" flag is set the same as for the 2553 * original VLAN. And towards the CPU, it doesn't 2554 * really matter, because @rx_vid will only receive 2555 * traffic on that port. For consistency with other dsa_8021q 2556 * VLANs, we'll keep the CPU port tagged. 2557 */ 2558 if (!tmp->untagged) 2559 new_vlan[match].tag_port |= BIT(tmp->port); 2560 new_vlan[match].tag_port |= BIT(upstream); 2561 /* Deny egress of @rx_vid towards our front-panel port. 2562 * This will force the switch to drop it, and we'll see 2563 * only the re-retagged packets (having the original, 2564 * pre-initial-retagging, VLAN @tmp->vid). 2565 */ 2566 new_vlan[match].vlan_bc &= ~BIT(tmp->port); 2567 2568 /* On reverse retagging, the same ingress VLAN goes to multiple 2569 * ports. So we have an opportunity to create composite rules 2570 * to not waste the limited space in the retagging table. 2571 */ 2572 k = sja1105_find_retagging_entry(new_retagging, *num_retagging, 2573 upstream, rx_vid, tmp->vid); 2574 if (k < 0) { 2575 if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) { 2576 dev_err(priv->ds->dev, "No more retagging rules\n"); 2577 rc = -ENOSPC; 2578 goto out; 2579 } 2580 k = (*num_retagging)++; 2581 } 2582 /* And the retagging itself */ 2583 new_retagging[k].vlan_ing = rx_vid; 2584 new_retagging[k].vlan_egr = tmp->vid; 2585 new_retagging[k].ing_port = BIT(upstream); 2586 new_retagging[k].egr_port |= BIT(tmp->port); 2587 } 2588 2589 out: 2590 list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) { 2591 list_del(&tmp->list); 2592 kfree(tmp); 2593 } 2594 2595 return rc; 2596 } 2597 2598 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify); 2599 2600 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv) 2601 { 2602 struct sja1105_crosschip_switch *s, *pos; 2603 struct list_head crosschip_switches; 2604 struct dsa_8021q_crosschip_link *c; 2605 int rc = 0; 2606 2607 INIT_LIST_HEAD(&crosschip_switches); 2608 2609 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { 2610 bool already_added = false; 2611 2612 list_for_each_entry(s, &crosschip_switches, list) { 2613 if (s->other_ctx == c->other_ctx) { 2614 already_added = true; 2615 break; 2616 } 2617 } 2618 2619 if (already_added) 2620 continue; 2621 2622 s = kzalloc(sizeof(*s), GFP_KERNEL); 2623 if (!s) { 2624 dev_err(priv->ds->dev, "Failed to allocate memory\n"); 2625 rc = -ENOMEM; 2626 goto out; 2627 } 2628 s->other_ctx = c->other_ctx; 2629 list_add(&s->list, &crosschip_switches); 2630 } 2631 2632 list_for_each_entry(s, &crosschip_switches, list) { 2633 struct sja1105_private *other_priv = s->other_ctx->ds->priv; 2634 2635 rc = sja1105_build_vlan_table(other_priv, false); 2636 if (rc) 2637 goto out; 2638 } 2639 2640 out: 2641 list_for_each_entry_safe(s, pos, &crosschip_switches, list) { 2642 list_del(&s->list); 2643 kfree(s); 2644 } 2645 2646 return rc; 2647 } 2648 2649 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify) 2650 { 2651 u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN]; 2652 struct sja1105_retagging_entry *new_retagging; 2653 struct sja1105_vlan_lookup_entry *new_vlan; 2654 struct sja1105_table *table; 2655 int i, num_retagging = 0; 2656 int rc; 2657 2658 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2659 new_vlan = kcalloc(VLAN_N_VID, 2660 table->ops->unpacked_entry_size, GFP_KERNEL); 2661 if (!new_vlan) 2662 return -ENOMEM; 2663 2664 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2665 new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT, 2666 table->ops->unpacked_entry_size, GFP_KERNEL); 2667 if (!new_retagging) { 2668 kfree(new_vlan); 2669 return -ENOMEM; 2670 } 2671 2672 for (i = 0; i < VLAN_N_VID; i++) 2673 new_vlan[i].vlanid = VLAN_N_VID; 2674 2675 for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++) 2676 new_retagging[i].vlan_ing = VLAN_N_VID; 2677 2678 for (i = 0; i < priv->ds->num_ports; i++) 2679 sja1105_init_subvlan_map(subvlan_map[i]); 2680 2681 /* Bridge VLANs */ 2682 rc = sja1105_build_bridge_vlans(priv, new_vlan); 2683 if (rc) 2684 goto out; 2685 2686 /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c: 2687 * - RX VLANs 2688 * - TX VLANs 2689 * - Crosschip links 2690 */ 2691 rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan); 2692 if (rc) 2693 goto out; 2694 2695 /* Private VLANs necessary for dsa_8021q operation, which we need to 2696 * determine on our own: 2697 * - Sub-VLANs 2698 * - Sub-VLANs of crosschip switches 2699 */ 2700 rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging, 2701 &num_retagging); 2702 if (rc) 2703 goto out; 2704 2705 rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging, 2706 &num_retagging); 2707 if (rc) 2708 goto out; 2709 2710 rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging); 2711 if (rc) 2712 goto out; 2713 2714 rc = sja1105_commit_pvid(priv); 2715 if (rc) 2716 goto out; 2717 2718 for (i = 0; i < priv->ds->num_ports; i++) 2719 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]); 2720 2721 if (notify) { 2722 rc = sja1105_notify_crosschip_switches(priv); 2723 if (rc) 2724 goto out; 2725 } 2726 2727 out: 2728 kfree(new_vlan); 2729 kfree(new_retagging); 2730 2731 return rc; 2732 } 2733 2734 /* The TPID setting belongs to the General Parameters table, 2735 * which can only be partially reconfigured at runtime (and not the TPID). 2736 * So a switch reset is required. 2737 */ 2738 int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 2739 struct netlink_ext_ack *extack) 2740 { 2741 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 2742 struct sja1105_general_params_entry *general_params; 2743 struct sja1105_private *priv = ds->priv; 2744 enum sja1105_vlan_state state; 2745 struct sja1105_table *table; 2746 struct sja1105_rule *rule; 2747 bool want_tagging; 2748 u16 tpid, tpid2; 2749 int rc; 2750 2751 list_for_each_entry(rule, &priv->flow_block.rules, list) { 2752 if (rule->type == SJA1105_RULE_VL) { 2753 NL_SET_ERR_MSG_MOD(extack, 2754 "Cannot change VLAN filtering with active VL rules"); 2755 return -EBUSY; 2756 } 2757 } 2758 2759 if (enabled) { 2760 /* Enable VLAN filtering. */ 2761 tpid = ETH_P_8021Q; 2762 tpid2 = ETH_P_8021AD; 2763 } else { 2764 /* Disable VLAN filtering. */ 2765 tpid = ETH_P_SJA1105; 2766 tpid2 = ETH_P_SJA1105; 2767 } 2768 2769 for (port = 0; port < ds->num_ports; port++) { 2770 struct sja1105_port *sp = &priv->ports[port]; 2771 2772 if (enabled) 2773 sp->xmit_tpid = priv->info->qinq_tpid; 2774 else 2775 sp->xmit_tpid = ETH_P_SJA1105; 2776 } 2777 2778 if (!enabled) 2779 state = SJA1105_VLAN_UNAWARE; 2780 else if (priv->best_effort_vlan_filtering) 2781 state = SJA1105_VLAN_BEST_EFFORT; 2782 else 2783 state = SJA1105_VLAN_FILTERING_FULL; 2784 2785 if (priv->vlan_state == state) 2786 return 0; 2787 2788 priv->vlan_state = state; 2789 want_tagging = (state == SJA1105_VLAN_UNAWARE || 2790 state == SJA1105_VLAN_BEST_EFFORT); 2791 2792 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 2793 general_params = table->entries; 2794 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 2795 general_params->tpid = tpid; 2796 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 2797 general_params->tpid2 = tpid2; 2798 /* When VLAN filtering is on, we need to at least be able to 2799 * decode management traffic through the "backup plan". 2800 */ 2801 general_params->incl_srcpt1 = enabled; 2802 general_params->incl_srcpt0 = enabled; 2803 2804 want_tagging = priv->best_effort_vlan_filtering || !enabled; 2805 2806 /* VLAN filtering => independent VLAN learning. 2807 * No VLAN filtering (or best effort) => shared VLAN learning. 2808 * 2809 * In shared VLAN learning mode, untagged traffic still gets 2810 * pvid-tagged, and the FDB table gets populated with entries 2811 * containing the "real" (pvid or from VLAN tag) VLAN ID. 2812 * However the switch performs a masked L2 lookup in the FDB, 2813 * effectively only looking up a frame's DMAC (and not VID) for the 2814 * forwarding decision. 2815 * 2816 * This is extremely convenient for us, because in modes with 2817 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 2818 * each front panel port. This is good for identification but breaks 2819 * learning badly - the VID of the learnt FDB entry is unique, aka 2820 * no frames coming from any other port are going to have it. So 2821 * for forwarding purposes, this is as though learning was broken 2822 * (all frames get flooded). 2823 */ 2824 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 2825 l2_lookup_params = table->entries; 2826 l2_lookup_params->shared_learn = want_tagging; 2827 2828 sja1105_frame_memory_partitioning(priv); 2829 2830 rc = sja1105_build_vlan_table(priv, false); 2831 if (rc) 2832 return rc; 2833 2834 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); 2835 if (rc) 2836 NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype"); 2837 2838 /* Switch port identification based on 802.1Q is only passable 2839 * if we are not under a vlan_filtering bridge. So make sure 2840 * the two configurations are mutually exclusive (of course, the 2841 * user may know better, i.e. best_effort_vlan_filtering). 2842 */ 2843 return sja1105_setup_8021q_tagging(ds, want_tagging); 2844 } 2845 2846 /* Returns number of VLANs added (0 or 1) on success, 2847 * or a negative error code. 2848 */ 2849 static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid, 2850 u16 flags, struct list_head *vlan_list) 2851 { 2852 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 2853 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 2854 struct sja1105_bridge_vlan *v; 2855 2856 list_for_each_entry(v, vlan_list, list) { 2857 if (v->port == port && v->vid == vid) { 2858 /* Already added */ 2859 if (v->untagged == untagged && v->pvid == pvid) 2860 /* Nothing changed */ 2861 return 0; 2862 2863 /* It's the same VLAN, but some of the flags changed 2864 * and the user did not bother to delete it first. 2865 * Update it and trigger sja1105_build_vlan_table. 2866 */ 2867 v->untagged = untagged; 2868 v->pvid = pvid; 2869 return 1; 2870 } 2871 } 2872 2873 v = kzalloc(sizeof(*v), GFP_KERNEL); 2874 if (!v) { 2875 dev_err(ds->dev, "Out of memory while storing VLAN\n"); 2876 return -ENOMEM; 2877 } 2878 2879 v->port = port; 2880 v->vid = vid; 2881 v->untagged = untagged; 2882 v->pvid = pvid; 2883 list_add(&v->list, vlan_list); 2884 2885 return 1; 2886 } 2887 2888 /* Returns number of VLANs deleted (0 or 1) */ 2889 static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid, 2890 struct list_head *vlan_list) 2891 { 2892 struct sja1105_bridge_vlan *v, *n; 2893 2894 list_for_each_entry_safe(v, n, vlan_list, list) { 2895 if (v->port == port && v->vid == vid) { 2896 list_del(&v->list); 2897 kfree(v); 2898 return 1; 2899 } 2900 } 2901 2902 return 0; 2903 } 2904 2905 static int sja1105_vlan_add(struct dsa_switch *ds, int port, 2906 const struct switchdev_obj_port_vlan *vlan, 2907 struct netlink_ext_ack *extack) 2908 { 2909 struct sja1105_private *priv = ds->priv; 2910 bool vlan_table_changed = false; 2911 int rc; 2912 2913 /* If the user wants best-effort VLAN filtering (aka vlan_filtering 2914 * bridge plus tagging), be sure to at least deny alterations to the 2915 * configuration done by dsa_8021q. 2916 */ 2917 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL && 2918 vid_is_dsa_8021q(vlan->vid)) { 2919 NL_SET_ERR_MSG_MOD(extack, 2920 "Range 1024-3071 reserved for dsa_8021q operation"); 2921 return -EBUSY; 2922 } 2923 2924 rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags, 2925 &priv->bridge_vlans); 2926 if (rc < 0) 2927 return rc; 2928 if (rc > 0) 2929 vlan_table_changed = true; 2930 2931 if (!vlan_table_changed) 2932 return 0; 2933 2934 return sja1105_build_vlan_table(priv, true); 2935 } 2936 2937 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 2938 const struct switchdev_obj_port_vlan *vlan) 2939 { 2940 struct sja1105_private *priv = ds->priv; 2941 bool vlan_table_changed = false; 2942 int rc; 2943 2944 rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans); 2945 if (rc > 0) 2946 vlan_table_changed = true; 2947 2948 if (!vlan_table_changed) 2949 return 0; 2950 2951 return sja1105_build_vlan_table(priv, true); 2952 } 2953 2954 static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 2955 u16 flags) 2956 { 2957 struct sja1105_private *priv = ds->priv; 2958 int rc; 2959 2960 rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans); 2961 if (rc <= 0) 2962 return rc; 2963 2964 return sja1105_build_vlan_table(priv, true); 2965 } 2966 2967 static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 2968 { 2969 struct sja1105_private *priv = ds->priv; 2970 int rc; 2971 2972 rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans); 2973 if (!rc) 2974 return 0; 2975 2976 return sja1105_build_vlan_table(priv, true); 2977 } 2978 2979 static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = { 2980 .vlan_add = sja1105_dsa_8021q_vlan_add, 2981 .vlan_del = sja1105_dsa_8021q_vlan_del, 2982 }; 2983 2984 /* The programming model for the SJA1105 switch is "all-at-once" via static 2985 * configuration tables. Some of these can be dynamically modified at runtime, 2986 * but not the xMII mode parameters table. 2987 * Furthermode, some PHYs may not have crystals for generating their clocks 2988 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 2989 * ref_clk pin. So port clocking needs to be initialized early, before 2990 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 2991 * Setting correct PHY link speed does not matter now. 2992 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 2993 * bindings are not yet parsed by DSA core. We need to parse early so that we 2994 * can populate the xMII mode parameters table. 2995 */ 2996 static int sja1105_setup(struct dsa_switch *ds) 2997 { 2998 struct sja1105_dt_port ports[SJA1105_MAX_NUM_PORTS]; 2999 struct sja1105_private *priv = ds->priv; 3000 int rc; 3001 3002 rc = sja1105_parse_dt(priv, ports); 3003 if (rc < 0) { 3004 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 3005 return rc; 3006 } 3007 3008 /* Error out early if internal delays are required through DT 3009 * and we can't apply them. 3010 */ 3011 rc = sja1105_parse_rgmii_delays(priv, ports); 3012 if (rc < 0) { 3013 dev_err(ds->dev, "RGMII delay not supported\n"); 3014 return rc; 3015 } 3016 3017 rc = sja1105_ptp_clock_register(ds); 3018 if (rc < 0) { 3019 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 3020 return rc; 3021 } 3022 /* Create and send configuration down to device */ 3023 rc = sja1105_static_config_load(priv, ports); 3024 if (rc < 0) { 3025 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 3026 goto out_ptp_clock_unregister; 3027 } 3028 /* Configure the CGU (PHY link modes and speeds) */ 3029 rc = priv->info->clocking_setup(priv); 3030 if (rc < 0) { 3031 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 3032 goto out_static_config_free; 3033 } 3034 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 3035 * The only thing we can do to disable it is lie about what the 802.1Q 3036 * EtherType is. 3037 * So it will still try to apply VLAN filtering, but all ingress 3038 * traffic (except frames received with EtherType of ETH_P_SJA1105) 3039 * will be internally tagged with a distorted VLAN header where the 3040 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 3041 */ 3042 ds->vlan_filtering_is_global = true; 3043 3044 /* Advertise the 8 egress queues */ 3045 ds->num_tx_queues = SJA1105_NUM_TC; 3046 3047 ds->mtu_enforcement_ingress = true; 3048 3049 priv->best_effort_vlan_filtering = true; 3050 3051 rc = sja1105_devlink_setup(ds); 3052 if (rc < 0) 3053 goto out_static_config_free; 3054 3055 /* The DSA/switchdev model brings up switch ports in standalone mode by 3056 * default, and that means vlan_filtering is 0 since they're not under 3057 * a bridge, so it's safe to set up switch tagging at this time. 3058 */ 3059 rtnl_lock(); 3060 rc = sja1105_setup_8021q_tagging(ds, true); 3061 rtnl_unlock(); 3062 if (rc) 3063 goto out_devlink_teardown; 3064 3065 return 0; 3066 3067 out_devlink_teardown: 3068 sja1105_devlink_teardown(ds); 3069 out_ptp_clock_unregister: 3070 sja1105_ptp_clock_unregister(ds); 3071 out_static_config_free: 3072 sja1105_static_config_free(&priv->static_config); 3073 3074 return rc; 3075 } 3076 3077 static void sja1105_teardown(struct dsa_switch *ds) 3078 { 3079 struct sja1105_private *priv = ds->priv; 3080 struct sja1105_bridge_vlan *v, *n; 3081 int port; 3082 3083 for (port = 0; port < ds->num_ports; port++) { 3084 struct sja1105_port *sp = &priv->ports[port]; 3085 3086 if (!dsa_is_user_port(ds, port)) 3087 continue; 3088 3089 if (sp->xmit_worker) 3090 kthread_destroy_worker(sp->xmit_worker); 3091 } 3092 3093 sja1105_devlink_teardown(ds); 3094 sja1105_flower_teardown(ds); 3095 sja1105_tas_teardown(ds); 3096 sja1105_ptp_clock_unregister(ds); 3097 sja1105_static_config_free(&priv->static_config); 3098 3099 list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) { 3100 list_del(&v->list); 3101 kfree(v); 3102 } 3103 3104 list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) { 3105 list_del(&v->list); 3106 kfree(v); 3107 } 3108 } 3109 3110 static void sja1105_port_disable(struct dsa_switch *ds, int port) 3111 { 3112 struct sja1105_private *priv = ds->priv; 3113 struct sja1105_port *sp = &priv->ports[port]; 3114 3115 if (!dsa_is_user_port(ds, port)) 3116 return; 3117 3118 kthread_cancel_work_sync(&sp->xmit_work); 3119 skb_queue_purge(&sp->xmit_queue); 3120 } 3121 3122 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 3123 struct sk_buff *skb, bool takets) 3124 { 3125 struct sja1105_mgmt_entry mgmt_route = {0}; 3126 struct sja1105_private *priv = ds->priv; 3127 struct ethhdr *hdr; 3128 int timeout = 10; 3129 int rc; 3130 3131 hdr = eth_hdr(skb); 3132 3133 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 3134 mgmt_route.destports = BIT(port); 3135 mgmt_route.enfport = 1; 3136 mgmt_route.tsreg = 0; 3137 mgmt_route.takets = takets; 3138 3139 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 3140 slot, &mgmt_route, true); 3141 if (rc < 0) { 3142 kfree_skb(skb); 3143 return rc; 3144 } 3145 3146 /* Transfer skb to the host port. */ 3147 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); 3148 3149 /* Wait until the switch has processed the frame */ 3150 do { 3151 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 3152 slot, &mgmt_route); 3153 if (rc < 0) { 3154 dev_err_ratelimited(priv->ds->dev, 3155 "failed to poll for mgmt route\n"); 3156 continue; 3157 } 3158 3159 /* UM10944: The ENFPORT flag of the respective entry is 3160 * cleared when a match is found. The host can use this 3161 * flag as an acknowledgment. 3162 */ 3163 cpu_relax(); 3164 } while (mgmt_route.enfport && --timeout); 3165 3166 if (!timeout) { 3167 /* Clean up the management route so that a follow-up 3168 * frame may not match on it by mistake. 3169 * This is only hardware supported on P/Q/R/S - on E/T it is 3170 * a no-op and we are silently discarding the -EOPNOTSUPP. 3171 */ 3172 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 3173 slot, &mgmt_route, false); 3174 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 3175 } 3176 3177 return NETDEV_TX_OK; 3178 } 3179 3180 #define work_to_port(work) \ 3181 container_of((work), struct sja1105_port, xmit_work) 3182 #define tagger_to_sja1105(t) \ 3183 container_of((t), struct sja1105_private, tagger_data) 3184 3185 /* Deferred work is unfortunately necessary because setting up the management 3186 * route cannot be done from atomit context (SPI transfer takes a sleepable 3187 * lock on the bus) 3188 */ 3189 static void sja1105_port_deferred_xmit(struct kthread_work *work) 3190 { 3191 struct sja1105_port *sp = work_to_port(work); 3192 struct sja1105_tagger_data *tagger_data = sp->data; 3193 struct sja1105_private *priv = tagger_to_sja1105(tagger_data); 3194 int port = sp - priv->ports; 3195 struct sk_buff *skb; 3196 3197 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { 3198 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; 3199 3200 mutex_lock(&priv->mgmt_lock); 3201 3202 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); 3203 3204 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 3205 if (clone) 3206 sja1105_ptp_txtstamp_skb(priv->ds, port, clone); 3207 3208 mutex_unlock(&priv->mgmt_lock); 3209 } 3210 } 3211 3212 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 3213 * which cannot be reconfigured at runtime. So a switch reset is required. 3214 */ 3215 static int sja1105_set_ageing_time(struct dsa_switch *ds, 3216 unsigned int ageing_time) 3217 { 3218 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 3219 struct sja1105_private *priv = ds->priv; 3220 struct sja1105_table *table; 3221 unsigned int maxage; 3222 3223 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 3224 l2_lookup_params = table->entries; 3225 3226 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 3227 3228 if (l2_lookup_params->maxage == maxage) 3229 return 0; 3230 3231 l2_lookup_params->maxage = maxage; 3232 3233 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); 3234 } 3235 3236 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 3237 { 3238 struct sja1105_l2_policing_entry *policing; 3239 struct sja1105_private *priv = ds->priv; 3240 3241 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN; 3242 3243 if (dsa_is_cpu_port(ds, port)) 3244 new_mtu += VLAN_HLEN; 3245 3246 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3247 3248 if (policing[port].maxlen == new_mtu) 3249 return 0; 3250 3251 policing[port].maxlen = new_mtu; 3252 3253 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3254 } 3255 3256 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port) 3257 { 3258 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; 3259 } 3260 3261 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, 3262 enum tc_setup_type type, 3263 void *type_data) 3264 { 3265 switch (type) { 3266 case TC_SETUP_QDISC_TAPRIO: 3267 return sja1105_setup_tc_taprio(ds, port, type_data); 3268 case TC_SETUP_QDISC_CBS: 3269 return sja1105_setup_tc_cbs(ds, port, type_data); 3270 default: 3271 return -EOPNOTSUPP; 3272 } 3273 } 3274 3275 /* We have a single mirror (@to) port, but can configure ingress and egress 3276 * mirroring on all other (@from) ports. 3277 * We need to allow mirroring rules only as long as the @to port is always the 3278 * same, and we need to unset the @to port from mirr_port only when there is no 3279 * mirroring rule that references it. 3280 */ 3281 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, 3282 bool ingress, bool enabled) 3283 { 3284 struct sja1105_general_params_entry *general_params; 3285 struct sja1105_mac_config_entry *mac; 3286 struct dsa_switch *ds = priv->ds; 3287 struct sja1105_table *table; 3288 bool already_enabled; 3289 u64 new_mirr_port; 3290 int rc; 3291 3292 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 3293 general_params = table->entries; 3294 3295 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 3296 3297 already_enabled = (general_params->mirr_port != ds->num_ports); 3298 if (already_enabled && enabled && general_params->mirr_port != to) { 3299 dev_err(priv->ds->dev, 3300 "Delete mirroring rules towards port %llu first\n", 3301 general_params->mirr_port); 3302 return -EBUSY; 3303 } 3304 3305 new_mirr_port = to; 3306 if (!enabled) { 3307 bool keep = false; 3308 int port; 3309 3310 /* Anybody still referencing mirr_port? */ 3311 for (port = 0; port < ds->num_ports; port++) { 3312 if (mac[port].ing_mirr || mac[port].egr_mirr) { 3313 keep = true; 3314 break; 3315 } 3316 } 3317 /* Unset already_enabled for next time */ 3318 if (!keep) 3319 new_mirr_port = ds->num_ports; 3320 } 3321 if (new_mirr_port != general_params->mirr_port) { 3322 general_params->mirr_port = new_mirr_port; 3323 3324 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS, 3325 0, general_params, true); 3326 if (rc < 0) 3327 return rc; 3328 } 3329 3330 if (ingress) 3331 mac[from].ing_mirr = enabled; 3332 else 3333 mac[from].egr_mirr = enabled; 3334 3335 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from, 3336 &mac[from], true); 3337 } 3338 3339 static int sja1105_mirror_add(struct dsa_switch *ds, int port, 3340 struct dsa_mall_mirror_tc_entry *mirror, 3341 bool ingress) 3342 { 3343 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 3344 ingress, true); 3345 } 3346 3347 static void sja1105_mirror_del(struct dsa_switch *ds, int port, 3348 struct dsa_mall_mirror_tc_entry *mirror) 3349 { 3350 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 3351 mirror->ingress, false); 3352 } 3353 3354 static int sja1105_port_policer_add(struct dsa_switch *ds, int port, 3355 struct dsa_mall_policer_tc_entry *policer) 3356 { 3357 struct sja1105_l2_policing_entry *policing; 3358 struct sja1105_private *priv = ds->priv; 3359 3360 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3361 3362 /* In hardware, every 8 microseconds the credit level is incremented by 3363 * the value of RATE bytes divided by 64, up to a maximum of SMAX 3364 * bytes. 3365 */ 3366 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, 3367 1000000); 3368 policing[port].smax = policer->burst; 3369 3370 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3371 } 3372 3373 static void sja1105_port_policer_del(struct dsa_switch *ds, int port) 3374 { 3375 struct sja1105_l2_policing_entry *policing; 3376 struct sja1105_private *priv = ds->priv; 3377 3378 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3379 3380 policing[port].rate = SJA1105_RATE_MBPS(1000); 3381 policing[port].smax = 65535; 3382 3383 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3384 } 3385 3386 static int sja1105_port_set_learning(struct sja1105_private *priv, int port, 3387 bool enabled) 3388 { 3389 struct sja1105_mac_config_entry *mac; 3390 int rc; 3391 3392 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 3393 3394 mac[port].dyn_learn = enabled; 3395 3396 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 3397 &mac[port], true); 3398 if (rc) 3399 return rc; 3400 3401 if (enabled) 3402 priv->learn_ena |= BIT(port); 3403 else 3404 priv->learn_ena &= ~BIT(port); 3405 3406 return 0; 3407 } 3408 3409 static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to, 3410 struct switchdev_brport_flags flags) 3411 { 3412 if (flags.mask & BR_FLOOD) { 3413 if (flags.val & BR_FLOOD) 3414 priv->ucast_egress_floods |= BIT(to); 3415 else 3416 priv->ucast_egress_floods &= ~BIT(to); 3417 } 3418 3419 if (flags.mask & BR_BCAST_FLOOD) { 3420 if (flags.val & BR_BCAST_FLOOD) 3421 priv->bcast_egress_floods |= BIT(to); 3422 else 3423 priv->bcast_egress_floods &= ~BIT(to); 3424 } 3425 3426 return sja1105_manage_flood_domains(priv); 3427 } 3428 3429 static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, 3430 struct switchdev_brport_flags flags, 3431 struct netlink_ext_ack *extack) 3432 { 3433 struct sja1105_l2_lookup_entry *l2_lookup; 3434 struct sja1105_table *table; 3435 int match; 3436 3437 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 3438 l2_lookup = table->entries; 3439 3440 for (match = 0; match < table->entry_count; match++) 3441 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST && 3442 l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST) 3443 break; 3444 3445 if (match == table->entry_count) { 3446 NL_SET_ERR_MSG_MOD(extack, 3447 "Could not find FDB entry for unknown multicast"); 3448 return -ENOSPC; 3449 } 3450 3451 if (flags.val & BR_MCAST_FLOOD) 3452 l2_lookup[match].destports |= BIT(to); 3453 else 3454 l2_lookup[match].destports &= ~BIT(to); 3455 3456 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 3457 l2_lookup[match].index, 3458 &l2_lookup[match], 3459 true); 3460 } 3461 3462 static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port, 3463 struct switchdev_brport_flags flags, 3464 struct netlink_ext_ack *extack) 3465 { 3466 struct sja1105_private *priv = ds->priv; 3467 3468 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 3469 BR_BCAST_FLOOD)) 3470 return -EINVAL; 3471 3472 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) && 3473 !priv->info->can_limit_mcast_flood) { 3474 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 3475 bool unicast = !!(flags.val & BR_FLOOD); 3476 3477 if (unicast != multicast) { 3478 NL_SET_ERR_MSG_MOD(extack, 3479 "This chip cannot configure multicast flooding independently of unicast"); 3480 return -EINVAL; 3481 } 3482 } 3483 3484 return 0; 3485 } 3486 3487 static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, 3488 struct switchdev_brport_flags flags, 3489 struct netlink_ext_ack *extack) 3490 { 3491 struct sja1105_private *priv = ds->priv; 3492 int rc; 3493 3494 if (flags.mask & BR_LEARNING) { 3495 bool learn_ena = !!(flags.val & BR_LEARNING); 3496 3497 rc = sja1105_port_set_learning(priv, port, learn_ena); 3498 if (rc) 3499 return rc; 3500 } 3501 3502 if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) { 3503 rc = sja1105_port_ucast_bcast_flood(priv, port, flags); 3504 if (rc) 3505 return rc; 3506 } 3507 3508 /* For chips that can't offload BR_MCAST_FLOOD independently, there 3509 * is nothing to do here, we ensured the configuration is in sync by 3510 * offloading BR_FLOOD. 3511 */ 3512 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) { 3513 rc = sja1105_port_mcast_flood(priv, port, flags, 3514 extack); 3515 if (rc) 3516 return rc; 3517 } 3518 3519 return 0; 3520 } 3521 3522 static const struct dsa_switch_ops sja1105_switch_ops = { 3523 .get_tag_protocol = sja1105_get_tag_protocol, 3524 .setup = sja1105_setup, 3525 .teardown = sja1105_teardown, 3526 .set_ageing_time = sja1105_set_ageing_time, 3527 .port_change_mtu = sja1105_change_mtu, 3528 .port_max_mtu = sja1105_get_max_mtu, 3529 .phylink_validate = sja1105_phylink_validate, 3530 .phylink_mac_link_state = sja1105_mac_pcs_get_state, 3531 .phylink_mac_config = sja1105_mac_config, 3532 .phylink_mac_link_up = sja1105_mac_link_up, 3533 .phylink_mac_link_down = sja1105_mac_link_down, 3534 .get_strings = sja1105_get_strings, 3535 .get_ethtool_stats = sja1105_get_ethtool_stats, 3536 .get_sset_count = sja1105_get_sset_count, 3537 .get_ts_info = sja1105_get_ts_info, 3538 .port_disable = sja1105_port_disable, 3539 .port_fdb_dump = sja1105_fdb_dump, 3540 .port_fdb_add = sja1105_fdb_add, 3541 .port_fdb_del = sja1105_fdb_del, 3542 .port_bridge_join = sja1105_bridge_join, 3543 .port_bridge_leave = sja1105_bridge_leave, 3544 .port_pre_bridge_flags = sja1105_port_pre_bridge_flags, 3545 .port_bridge_flags = sja1105_port_bridge_flags, 3546 .port_stp_state_set = sja1105_bridge_stp_state_set, 3547 .port_vlan_filtering = sja1105_vlan_filtering, 3548 .port_vlan_add = sja1105_vlan_add, 3549 .port_vlan_del = sja1105_vlan_del, 3550 .port_mdb_add = sja1105_mdb_add, 3551 .port_mdb_del = sja1105_mdb_del, 3552 .port_hwtstamp_get = sja1105_hwtstamp_get, 3553 .port_hwtstamp_set = sja1105_hwtstamp_set, 3554 .port_rxtstamp = sja1105_port_rxtstamp, 3555 .port_txtstamp = sja1105_port_txtstamp, 3556 .port_setup_tc = sja1105_port_setup_tc, 3557 .port_mirror_add = sja1105_mirror_add, 3558 .port_mirror_del = sja1105_mirror_del, 3559 .port_policer_add = sja1105_port_policer_add, 3560 .port_policer_del = sja1105_port_policer_del, 3561 .cls_flower_add = sja1105_cls_flower_add, 3562 .cls_flower_del = sja1105_cls_flower_del, 3563 .cls_flower_stats = sja1105_cls_flower_stats, 3564 .crosschip_bridge_join = sja1105_crosschip_bridge_join, 3565 .crosschip_bridge_leave = sja1105_crosschip_bridge_leave, 3566 .devlink_param_get = sja1105_devlink_param_get, 3567 .devlink_param_set = sja1105_devlink_param_set, 3568 .devlink_info_get = sja1105_devlink_info_get, 3569 }; 3570 3571 static const struct of_device_id sja1105_dt_ids[]; 3572 3573 static int sja1105_check_device_id(struct sja1105_private *priv) 3574 { 3575 const struct sja1105_regs *regs = priv->info->regs; 3576 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 3577 struct device *dev = &priv->spidev->dev; 3578 const struct of_device_id *match; 3579 u32 device_id; 3580 u64 part_no; 3581 int rc; 3582 3583 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, 3584 NULL); 3585 if (rc < 0) 3586 return rc; 3587 3588 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, 3589 SJA1105_SIZE_DEVICE_ID); 3590 if (rc < 0) 3591 return rc; 3592 3593 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 3594 3595 for (match = sja1105_dt_ids; match->compatible[0]; match++) { 3596 const struct sja1105_info *info = match->data; 3597 3598 /* Is what's been probed in our match table at all? */ 3599 if (info->device_id != device_id || info->part_no != part_no) 3600 continue; 3601 3602 /* But is it what's in the device tree? */ 3603 if (priv->info->device_id != device_id || 3604 priv->info->part_no != part_no) { 3605 dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n", 3606 priv->info->name, info->name); 3607 /* It isn't. No problem, pick that up. */ 3608 priv->info = info; 3609 } 3610 3611 return 0; 3612 } 3613 3614 dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n", 3615 device_id, part_no); 3616 3617 return -ENODEV; 3618 } 3619 3620 static int sja1105_probe(struct spi_device *spi) 3621 { 3622 struct sja1105_tagger_data *tagger_data; 3623 struct device *dev = &spi->dev; 3624 struct sja1105_private *priv; 3625 size_t max_xfer, max_msg; 3626 struct dsa_switch *ds; 3627 int rc, port; 3628 3629 if (!dev->of_node) { 3630 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 3631 return -EINVAL; 3632 } 3633 3634 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 3635 if (!priv) 3636 return -ENOMEM; 3637 3638 /* Configure the optional reset pin and bring up switch */ 3639 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 3640 if (IS_ERR(priv->reset_gpio)) 3641 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 3642 else 3643 sja1105_hw_reset(priv->reset_gpio, 1, 1); 3644 3645 /* Populate our driver private structure (priv) based on 3646 * the device tree node that was probed (spi) 3647 */ 3648 priv->spidev = spi; 3649 spi_set_drvdata(spi, priv); 3650 3651 /* Configure the SPI bus */ 3652 spi->bits_per_word = 8; 3653 rc = spi_setup(spi); 3654 if (rc < 0) { 3655 dev_err(dev, "Could not init SPI\n"); 3656 return rc; 3657 } 3658 3659 /* In sja1105_xfer, we send spi_messages composed of two spi_transfers: 3660 * a small one for the message header and another one for the current 3661 * chunk of the packed buffer. 3662 * Check that the restrictions imposed by the SPI controller are 3663 * respected: the chunk buffer is smaller than the max transfer size, 3664 * and the total length of the chunk plus its message header is smaller 3665 * than the max message size. 3666 * We do that during probe time since the maximum transfer size is a 3667 * runtime invariant. 3668 */ 3669 max_xfer = spi_max_transfer_size(spi); 3670 max_msg = spi_max_message_size(spi); 3671 3672 /* We need to send at least one 64-bit word of SPI payload per message 3673 * in order to be able to make useful progress. 3674 */ 3675 if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) { 3676 dev_err(dev, "SPI master cannot send large enough buffers, aborting\n"); 3677 return -EINVAL; 3678 } 3679 3680 priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN; 3681 if (priv->max_xfer_len > max_xfer) 3682 priv->max_xfer_len = max_xfer; 3683 if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER) 3684 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER; 3685 3686 priv->info = of_device_get_match_data(dev); 3687 3688 /* Detect hardware device */ 3689 rc = sja1105_check_device_id(priv); 3690 if (rc < 0) { 3691 dev_err(dev, "Device ID check failed: %d\n", rc); 3692 return rc; 3693 } 3694 3695 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 3696 3697 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); 3698 if (!ds) 3699 return -ENOMEM; 3700 3701 ds->dev = dev; 3702 ds->num_ports = SJA1105_MAX_NUM_PORTS; 3703 ds->ops = &sja1105_switch_ops; 3704 ds->priv = priv; 3705 priv->ds = ds; 3706 3707 tagger_data = &priv->tagger_data; 3708 3709 mutex_init(&priv->ptp_data.lock); 3710 mutex_init(&priv->mgmt_lock); 3711 3712 priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx), 3713 GFP_KERNEL); 3714 if (!priv->dsa_8021q_ctx) 3715 return -ENOMEM; 3716 3717 priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops; 3718 priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q); 3719 priv->dsa_8021q_ctx->ds = ds; 3720 3721 INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links); 3722 INIT_LIST_HEAD(&priv->bridge_vlans); 3723 INIT_LIST_HEAD(&priv->dsa_8021q_vlans); 3724 3725 sja1105_tas_setup(ds); 3726 sja1105_flower_setup(ds); 3727 3728 rc = dsa_register_switch(priv->ds); 3729 if (rc) 3730 return rc; 3731 3732 if (IS_ENABLED(CONFIG_NET_SCH_CBS)) { 3733 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, 3734 sizeof(struct sja1105_cbs_entry), 3735 GFP_KERNEL); 3736 if (!priv->cbs) { 3737 rc = -ENOMEM; 3738 goto out_unregister_switch; 3739 } 3740 } 3741 3742 /* Connections between dsa_port and sja1105_port */ 3743 for (port = 0; port < ds->num_ports; port++) { 3744 struct sja1105_port *sp = &priv->ports[port]; 3745 struct dsa_port *dp = dsa_to_port(ds, port); 3746 struct net_device *slave; 3747 int subvlan; 3748 3749 if (!dsa_is_user_port(ds, port)) 3750 continue; 3751 3752 dp->priv = sp; 3753 sp->dp = dp; 3754 sp->data = tagger_data; 3755 slave = dp->slave; 3756 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); 3757 sp->xmit_worker = kthread_create_worker(0, "%s_xmit", 3758 slave->name); 3759 if (IS_ERR(sp->xmit_worker)) { 3760 rc = PTR_ERR(sp->xmit_worker); 3761 dev_err(ds->dev, 3762 "failed to create deferred xmit thread: %d\n", 3763 rc); 3764 goto out_destroy_workers; 3765 } 3766 skb_queue_head_init(&sp->xmit_queue); 3767 sp->xmit_tpid = ETH_P_SJA1105; 3768 3769 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 3770 sp->subvlan_map[subvlan] = VLAN_N_VID; 3771 } 3772 3773 return 0; 3774 3775 out_destroy_workers: 3776 while (port-- > 0) { 3777 struct sja1105_port *sp = &priv->ports[port]; 3778 3779 if (!dsa_is_user_port(ds, port)) 3780 continue; 3781 3782 kthread_destroy_worker(sp->xmit_worker); 3783 } 3784 3785 out_unregister_switch: 3786 dsa_unregister_switch(ds); 3787 3788 return rc; 3789 } 3790 3791 static int sja1105_remove(struct spi_device *spi) 3792 { 3793 struct sja1105_private *priv = spi_get_drvdata(spi); 3794 3795 dsa_unregister_switch(priv->ds); 3796 return 0; 3797 } 3798 3799 static const struct of_device_id sja1105_dt_ids[] = { 3800 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 3801 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 3802 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 3803 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 3804 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 3805 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 3806 { /* sentinel */ }, 3807 }; 3808 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 3809 3810 static struct spi_driver sja1105_driver = { 3811 .driver = { 3812 .name = "sja1105", 3813 .owner = THIS_MODULE, 3814 .of_match_table = of_match_ptr(sja1105_dt_ids), 3815 }, 3816 .probe = sja1105_probe, 3817 .remove = sja1105_remove, 3818 }; 3819 3820 module_spi_driver(sja1105_driver); 3821 3822 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 3823 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 3824 MODULE_DESCRIPTION("SJA1105 Driver"); 3825 MODULE_LICENSE("GPL v2"); 3826