1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 #include "sja1105_sgmii.h" 26 #include "sja1105_tas.h" 27 28 #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull 29 #define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) 30 31 static const struct dsa_switch_ops sja1105_switch_ops; 32 33 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 34 unsigned int startup_delay) 35 { 36 gpiod_set_value_cansleep(gpio, 1); 37 /* Wait for minimum reset pulse length */ 38 msleep(pulse_len); 39 gpiod_set_value_cansleep(gpio, 0); 40 /* Wait until chip is ready after reset */ 41 msleep(startup_delay); 42 } 43 44 static void 45 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 46 int from, int to, bool allow) 47 { 48 if (allow) 49 l2_fwd[from].reach_port |= BIT(to); 50 else 51 l2_fwd[from].reach_port &= ~BIT(to); 52 } 53 54 static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd, 55 int from, int to) 56 { 57 return !!(l2_fwd[from].reach_port & BIT(to)); 58 } 59 60 /* Structure used to temporarily transport device tree 61 * settings into sja1105_setup 62 */ 63 struct sja1105_dt_port { 64 phy_interface_t phy_mode; 65 sja1105_mii_role_t role; 66 }; 67 68 static int sja1105_init_mac_settings(struct sja1105_private *priv) 69 { 70 struct sja1105_mac_config_entry default_mac = { 71 /* Enable all 8 priority queues on egress. 72 * Every queue i holds top[i] - base[i] frames. 73 * Sum of top[i] - base[i] is 511 (max hardware limit). 74 */ 75 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 76 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 77 .enabled = {true, true, true, true, true, true, true, true}, 78 /* Keep standard IFG of 12 bytes on egress. */ 79 .ifg = 0, 80 /* Always put the MAC speed in automatic mode, where it can be 81 * adjusted at runtime by PHYLINK. 82 */ 83 .speed = priv->info->port_speed[SJA1105_SPEED_AUTO], 84 /* No static correction for 1-step 1588 events */ 85 .tp_delin = 0, 86 .tp_delout = 0, 87 /* Disable aging for critical TTEthernet traffic */ 88 .maxage = 0xFF, 89 /* Internal VLAN (pvid) to apply to untagged ingress */ 90 .vlanprio = 0, 91 .vlanid = 1, 92 .ing_mirr = false, 93 .egr_mirr = false, 94 /* Don't drop traffic with other EtherType than ETH_P_IP */ 95 .drpnona664 = false, 96 /* Don't drop double-tagged traffic */ 97 .drpdtag = false, 98 /* Don't drop untagged traffic */ 99 .drpuntag = false, 100 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 101 .retag = false, 102 /* Disable learning and I/O on user ports by default - 103 * STP will enable it. 104 */ 105 .dyn_learn = false, 106 .egress = false, 107 .ingress = false, 108 }; 109 struct sja1105_mac_config_entry *mac; 110 struct dsa_switch *ds = priv->ds; 111 struct sja1105_table *table; 112 int i; 113 114 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 115 116 /* Discard previous MAC Configuration Table */ 117 if (table->entry_count) { 118 kfree(table->entries); 119 table->entry_count = 0; 120 } 121 122 table->entries = kcalloc(table->ops->max_entry_count, 123 table->ops->unpacked_entry_size, GFP_KERNEL); 124 if (!table->entries) 125 return -ENOMEM; 126 127 table->entry_count = table->ops->max_entry_count; 128 129 mac = table->entries; 130 131 for (i = 0; i < ds->num_ports; i++) { 132 mac[i] = default_mac; 133 if (i == dsa_upstream_port(priv->ds, i)) { 134 /* STP doesn't get called for CPU port, so we need to 135 * set the I/O parameters statically. 136 */ 137 mac[i].dyn_learn = true; 138 mac[i].ingress = true; 139 mac[i].egress = true; 140 } 141 } 142 143 return 0; 144 } 145 146 static int sja1105_init_mii_settings(struct sja1105_private *priv, 147 struct sja1105_dt_port *ports) 148 { 149 struct device *dev = &priv->spidev->dev; 150 struct sja1105_xmii_params_entry *mii; 151 struct dsa_switch *ds = priv->ds; 152 struct sja1105_table *table; 153 int i; 154 155 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 156 157 /* Discard previous xMII Mode Parameters Table */ 158 if (table->entry_count) { 159 kfree(table->entries); 160 table->entry_count = 0; 161 } 162 163 table->entries = kcalloc(table->ops->max_entry_count, 164 table->ops->unpacked_entry_size, GFP_KERNEL); 165 if (!table->entries) 166 return -ENOMEM; 167 168 /* Override table based on PHYLINK DT bindings */ 169 table->entry_count = table->ops->max_entry_count; 170 171 mii = table->entries; 172 173 for (i = 0; i < ds->num_ports; i++) { 174 if (dsa_is_unused_port(priv->ds, i)) 175 continue; 176 177 switch (ports[i].phy_mode) { 178 case PHY_INTERFACE_MODE_MII: 179 if (!priv->info->supports_mii[i]) 180 goto unsupported; 181 182 mii->xmii_mode[i] = XMII_MODE_MII; 183 break; 184 case PHY_INTERFACE_MODE_RMII: 185 if (!priv->info->supports_rmii[i]) 186 goto unsupported; 187 188 mii->xmii_mode[i] = XMII_MODE_RMII; 189 break; 190 case PHY_INTERFACE_MODE_RGMII: 191 case PHY_INTERFACE_MODE_RGMII_ID: 192 case PHY_INTERFACE_MODE_RGMII_RXID: 193 case PHY_INTERFACE_MODE_RGMII_TXID: 194 if (!priv->info->supports_rgmii[i]) 195 goto unsupported; 196 197 mii->xmii_mode[i] = XMII_MODE_RGMII; 198 break; 199 case PHY_INTERFACE_MODE_SGMII: 200 if (!priv->info->supports_sgmii[i]) 201 goto unsupported; 202 203 mii->xmii_mode[i] = XMII_MODE_SGMII; 204 break; 205 case PHY_INTERFACE_MODE_2500BASEX: 206 if (!priv->info->supports_2500basex[i]) 207 goto unsupported; 208 209 mii->xmii_mode[i] = XMII_MODE_SGMII; 210 break; 211 unsupported: 212 default: 213 dev_err(dev, "Unsupported PHY mode %s on port %d!\n", 214 phy_modes(ports[i].phy_mode), i); 215 return -EINVAL; 216 } 217 218 /* Even though the SerDes port is able to drive SGMII autoneg 219 * like a PHY would, from the perspective of the XMII tables, 220 * the SGMII port should always be put in MAC mode. 221 * Similarly, RGMII is a symmetric protocol electrically 222 * speaking, and the 'RGMII PHY' role does not mean anything to 223 * hardware. Just keep the 'PHY role' notation relevant to the 224 * driver to mean 'the switch port should apply RGMII delays', 225 * but unconditionally put the port in the MAC role. 226 */ 227 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII || 228 phy_interface_mode_is_rgmii(ports[i].phy_mode)) 229 mii->phy_mac[i] = XMII_MAC; 230 else 231 mii->phy_mac[i] = ports[i].role; 232 } 233 return 0; 234 } 235 236 static int sja1105_init_static_fdb(struct sja1105_private *priv) 237 { 238 struct sja1105_l2_lookup_entry *l2_lookup; 239 struct sja1105_table *table; 240 int port; 241 242 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 243 244 /* We only populate the FDB table through dynamic L2 Address Lookup 245 * entries, except for a special entry at the end which is a catch-all 246 * for unknown multicast and will be used to control flooding domain. 247 */ 248 if (table->entry_count) { 249 kfree(table->entries); 250 table->entry_count = 0; 251 } 252 253 if (!priv->info->can_limit_mcast_flood) 254 return 0; 255 256 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 257 GFP_KERNEL); 258 if (!table->entries) 259 return -ENOMEM; 260 261 table->entry_count = 1; 262 l2_lookup = table->entries; 263 264 /* All L2 multicast addresses have an odd first octet */ 265 l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST; 266 l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST; 267 l2_lookup[0].lockeds = true; 268 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1; 269 270 /* Flood multicast to every port by default */ 271 for (port = 0; port < priv->ds->num_ports; port++) 272 if (!dsa_is_unused_port(priv->ds, port)) 273 l2_lookup[0].destports |= BIT(port); 274 275 return 0; 276 } 277 278 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 279 { 280 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 281 /* Learned FDB entries are forgotten after 300 seconds */ 282 .maxage = SJA1105_AGEING_TIME_MS(300000), 283 /* All entries within a FDB bin are available for learning */ 284 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 285 /* And the P/Q/R/S equivalent setting: */ 286 .start_dynspc = 0, 287 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 288 .poly = 0x97, 289 /* This selects between Independent VLAN Learning (IVL) and 290 * Shared VLAN Learning (SVL) 291 */ 292 .shared_learn = true, 293 /* Don't discard management traffic based on ENFPORT - 294 * we don't perform SMAC port enforcement anyway, so 295 * what we are setting here doesn't matter. 296 */ 297 .no_enf_hostprt = false, 298 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 299 * Maybe correlate with no_linklocal_learn from bridge driver? 300 */ 301 .no_mgmt_learn = true, 302 /* P/Q/R/S only */ 303 .use_static = true, 304 /* Dynamically learned FDB entries can overwrite other (older) 305 * dynamic FDB entries 306 */ 307 .owr_dyn = true, 308 .drpnolearn = true, 309 }; 310 struct dsa_switch *ds = priv->ds; 311 int port, num_used_ports = 0; 312 struct sja1105_table *table; 313 u64 max_fdb_entries; 314 315 for (port = 0; port < ds->num_ports; port++) 316 if (!dsa_is_unused_port(ds, port)) 317 num_used_ports++; 318 319 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports; 320 321 for (port = 0; port < ds->num_ports; port++) { 322 if (dsa_is_unused_port(ds, port)) 323 continue; 324 325 default_l2_lookup_params.maxaddrp[port] = max_fdb_entries; 326 } 327 328 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 329 330 if (table->entry_count) { 331 kfree(table->entries); 332 table->entry_count = 0; 333 } 334 335 table->entries = kcalloc(table->ops->max_entry_count, 336 table->ops->unpacked_entry_size, GFP_KERNEL); 337 if (!table->entries) 338 return -ENOMEM; 339 340 table->entry_count = table->ops->max_entry_count; 341 342 /* This table only has a single entry */ 343 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 344 default_l2_lookup_params; 345 346 return 0; 347 } 348 349 /* Set up a default VLAN for untagged traffic injected from the CPU 350 * using management routes (e.g. STP, PTP) as opposed to tag_8021q. 351 * All DT-defined ports are members of this VLAN, and there are no 352 * restrictions on forwarding (since the CPU selects the destination). 353 * Frames from this VLAN will always be transmitted as untagged, and 354 * neither the bridge nor the 8021q module cannot create this VLAN ID. 355 */ 356 static int sja1105_init_static_vlan(struct sja1105_private *priv) 357 { 358 struct sja1105_table *table; 359 struct sja1105_vlan_lookup_entry pvid = { 360 .ving_mirr = 0, 361 .vegr_mirr = 0, 362 .vmemb_port = 0, 363 .vlan_bc = 0, 364 .tag_port = 0, 365 .vlanid = SJA1105_DEFAULT_VLAN, 366 }; 367 struct dsa_switch *ds = priv->ds; 368 int port; 369 370 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 371 372 if (table->entry_count) { 373 kfree(table->entries); 374 table->entry_count = 0; 375 } 376 377 table->entries = kzalloc(table->ops->unpacked_entry_size, 378 GFP_KERNEL); 379 if (!table->entries) 380 return -ENOMEM; 381 382 table->entry_count = 1; 383 384 for (port = 0; port < ds->num_ports; port++) { 385 struct sja1105_bridge_vlan *v; 386 387 if (dsa_is_unused_port(ds, port)) 388 continue; 389 390 pvid.vmemb_port |= BIT(port); 391 pvid.vlan_bc |= BIT(port); 392 pvid.tag_port &= ~BIT(port); 393 394 v = kzalloc(sizeof(*v), GFP_KERNEL); 395 if (!v) 396 return -ENOMEM; 397 398 v->port = port; 399 v->vid = SJA1105_DEFAULT_VLAN; 400 v->untagged = true; 401 if (dsa_is_cpu_port(ds, port)) 402 v->pvid = true; 403 list_add(&v->list, &priv->dsa_8021q_vlans); 404 } 405 406 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 407 return 0; 408 } 409 410 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 411 { 412 struct sja1105_l2_forwarding_entry *l2fwd; 413 struct dsa_switch *ds = priv->ds; 414 struct sja1105_table *table; 415 int i, j; 416 417 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 418 419 if (table->entry_count) { 420 kfree(table->entries); 421 table->entry_count = 0; 422 } 423 424 table->entries = kcalloc(table->ops->max_entry_count, 425 table->ops->unpacked_entry_size, GFP_KERNEL); 426 if (!table->entries) 427 return -ENOMEM; 428 429 table->entry_count = table->ops->max_entry_count; 430 431 l2fwd = table->entries; 432 433 /* First 5 entries define the forwarding rules */ 434 for (i = 0; i < ds->num_ports; i++) { 435 unsigned int upstream = dsa_upstream_port(priv->ds, i); 436 437 if (dsa_is_unused_port(ds, i)) 438 continue; 439 440 for (j = 0; j < SJA1105_NUM_TC; j++) 441 l2fwd[i].vlan_pmap[j] = j; 442 443 /* All ports start up with egress flooding enabled, 444 * including the CPU port. 445 */ 446 priv->ucast_egress_floods |= BIT(i); 447 priv->bcast_egress_floods |= BIT(i); 448 449 if (i == upstream) 450 continue; 451 452 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 453 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 454 455 l2fwd[i].bc_domain = BIT(upstream); 456 l2fwd[i].fl_domain = BIT(upstream); 457 458 l2fwd[upstream].bc_domain |= BIT(i); 459 l2fwd[upstream].fl_domain |= BIT(i); 460 } 461 462 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 463 * Create a one-to-one mapping. 464 */ 465 for (i = 0; i < SJA1105_NUM_TC; i++) { 466 for (j = 0; j < ds->num_ports; j++) { 467 if (dsa_is_unused_port(ds, j)) 468 continue; 469 470 l2fwd[ds->num_ports + i].vlan_pmap[j] = i; 471 } 472 } 473 474 return 0; 475 } 476 477 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 478 { 479 struct sja1105_l2_forwarding_params_entry *l2fwd_params; 480 struct sja1105_table *table; 481 482 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 483 484 if (table->entry_count) { 485 kfree(table->entries); 486 table->entry_count = 0; 487 } 488 489 table->entries = kcalloc(table->ops->max_entry_count, 490 table->ops->unpacked_entry_size, GFP_KERNEL); 491 if (!table->entries) 492 return -ENOMEM; 493 494 table->entry_count = table->ops->max_entry_count; 495 496 /* This table only has a single entry */ 497 l2fwd_params = table->entries; 498 499 /* Disallow dynamic reconfiguration of vlan_pmap */ 500 l2fwd_params->max_dynp = 0; 501 /* Use a single memory partition for all ingress queues */ 502 l2fwd_params->part_spc[0] = priv->info->max_frame_mem; 503 504 return 0; 505 } 506 507 void sja1105_frame_memory_partitioning(struct sja1105_private *priv) 508 { 509 struct sja1105_l2_forwarding_params_entry *l2_fwd_params; 510 struct sja1105_vl_forwarding_params_entry *vl_fwd_params; 511 int max_mem = priv->info->max_frame_mem; 512 struct sja1105_table *table; 513 514 /* VLAN retagging is implemented using a loopback port that consumes 515 * frame buffers. That leaves less for us. 516 */ 517 if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT) 518 max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD; 519 520 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 521 l2_fwd_params = table->entries; 522 l2_fwd_params->part_spc[0] = max_mem; 523 524 /* If we have any critical-traffic virtual links, we need to reserve 525 * some frame buffer memory for them. At the moment, hardcode the value 526 * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks 527 * remaining for best-effort traffic. TODO: figure out a more flexible 528 * way to perform the frame buffer partitioning. 529 */ 530 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count) 531 return; 532 533 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS]; 534 vl_fwd_params = table->entries; 535 536 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY; 537 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY; 538 } 539 540 static int sja1105_init_general_params(struct sja1105_private *priv) 541 { 542 struct sja1105_general_params_entry default_general_params = { 543 /* Allow dynamic changing of the mirror port */ 544 .mirr_ptacu = true, 545 .switchid = priv->ds->index, 546 /* Priority queue for link-local management frames 547 * (both ingress to and egress from CPU - PTP, STP etc) 548 */ 549 .hostprio = 7, 550 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 551 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 552 .incl_srcpt1 = false, 553 .send_meta1 = false, 554 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 555 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 556 .incl_srcpt0 = false, 557 .send_meta0 = false, 558 /* The destination for traffic matching mac_fltres1 and 559 * mac_fltres0 on all ports except host_port. Such traffic 560 * receieved on host_port itself would be dropped, except 561 * by installing a temporary 'management route' 562 */ 563 .host_port = priv->ds->num_ports, 564 /* Default to an invalid value */ 565 .mirr_port = priv->ds->num_ports, 566 /* Link-local traffic received on casc_port will be forwarded 567 * to host_port without embedding the source port and device ID 568 * info in the destination MAC address (presumably because it 569 * is a cascaded port and a downstream SJA switch already did 570 * that). Default to an invalid port (to disable the feature) 571 * and overwrite this if we find any DSA (cascaded) ports. 572 */ 573 .casc_port = priv->ds->num_ports, 574 /* No TTEthernet */ 575 .vllupformat = SJA1105_VL_FORMAT_PSFP, 576 .vlmarker = 0, 577 .vlmask = 0, 578 /* Only update correctionField for 1-step PTP (L2 transport) */ 579 .ignore2stf = 0, 580 /* Forcefully disable VLAN filtering by telling 581 * the switch that VLAN has a different EtherType. 582 */ 583 .tpid = ETH_P_SJA1105, 584 .tpid2 = ETH_P_SJA1105, 585 }; 586 struct dsa_switch *ds = priv->ds; 587 struct sja1105_table *table; 588 int port; 589 590 for (port = 0; port < ds->num_ports; port++) { 591 if (dsa_is_cpu_port(ds, port)) { 592 default_general_params.host_port = port; 593 break; 594 } 595 } 596 597 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 598 599 if (table->entry_count) { 600 kfree(table->entries); 601 table->entry_count = 0; 602 } 603 604 table->entries = kcalloc(table->ops->max_entry_count, 605 table->ops->unpacked_entry_size, GFP_KERNEL); 606 if (!table->entries) 607 return -ENOMEM; 608 609 table->entry_count = table->ops->max_entry_count; 610 611 /* This table only has a single entry */ 612 ((struct sja1105_general_params_entry *)table->entries)[0] = 613 default_general_params; 614 615 return 0; 616 } 617 618 static int sja1105_init_avb_params(struct sja1105_private *priv) 619 { 620 struct sja1105_avb_params_entry *avb; 621 struct sja1105_table *table; 622 623 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 624 625 /* Discard previous AVB Parameters Table */ 626 if (table->entry_count) { 627 kfree(table->entries); 628 table->entry_count = 0; 629 } 630 631 table->entries = kcalloc(table->ops->max_entry_count, 632 table->ops->unpacked_entry_size, GFP_KERNEL); 633 if (!table->entries) 634 return -ENOMEM; 635 636 table->entry_count = table->ops->max_entry_count; 637 638 avb = table->entries; 639 640 /* Configure the MAC addresses for meta frames */ 641 avb->destmeta = SJA1105_META_DMAC; 642 avb->srcmeta = SJA1105_META_SMAC; 643 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by 644 * default. This is because there might be boards with a hardware 645 * layout where enabling the pin as output might cause an electrical 646 * clash. On E/T the pin is always an output, which the board designers 647 * probably already knew, so even if there are going to be electrical 648 * issues, there's nothing we can do. 649 */ 650 avb->cas_master = false; 651 652 return 0; 653 } 654 655 /* The L2 policing table is 2-stage. The table is looked up for each frame 656 * according to the ingress port, whether it was broadcast or not, and the 657 * classified traffic class (given by VLAN PCP). This portion of the lookup is 658 * fixed, and gives access to the SHARINDX, an indirection register pointing 659 * within the policing table itself, which is used to resolve the policer that 660 * will be used for this frame. 661 * 662 * Stage 1 Stage 2 663 * +------------+--------+ +---------------------------------+ 664 * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU | 665 * +------------+--------+ +---------------------------------+ 666 * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU | 667 * +------------+--------+ +---------------------------------+ 668 * ... | Policer 2: Rate, Burst, MTU | 669 * +------------+--------+ +---------------------------------+ 670 * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU | 671 * +------------+--------+ +---------------------------------+ 672 * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU | 673 * +------------+--------+ +---------------------------------+ 674 * ... | Policer 5: Rate, Burst, MTU | 675 * +------------+--------+ +---------------------------------+ 676 * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU | 677 * +------------+--------+ +---------------------------------+ 678 * ... | Policer 7: Rate, Burst, MTU | 679 * +------------+--------+ +---------------------------------+ 680 * |Port 4 TC 7 |SHARINDX| ... 681 * +------------+--------+ 682 * |Port 0 BCAST|SHARINDX| ... 683 * +------------+--------+ 684 * |Port 1 BCAST|SHARINDX| ... 685 * +------------+--------+ 686 * ... ... 687 * +------------+--------+ +---------------------------------+ 688 * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU | 689 * +------------+--------+ +---------------------------------+ 690 * 691 * In this driver, we shall use policers 0-4 as statically alocated port 692 * (matchall) policers. So we need to make the SHARINDX for all lookups 693 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast 694 * lookup) equal. 695 * The remaining policers (40) shall be dynamically allocated for flower 696 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff. 697 */ 698 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 699 700 static int sja1105_init_l2_policing(struct sja1105_private *priv) 701 { 702 struct sja1105_l2_policing_entry *policing; 703 struct dsa_switch *ds = priv->ds; 704 struct sja1105_table *table; 705 int port, tc; 706 707 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 708 709 /* Discard previous L2 Policing Table */ 710 if (table->entry_count) { 711 kfree(table->entries); 712 table->entry_count = 0; 713 } 714 715 table->entries = kcalloc(table->ops->max_entry_count, 716 table->ops->unpacked_entry_size, GFP_KERNEL); 717 if (!table->entries) 718 return -ENOMEM; 719 720 table->entry_count = table->ops->max_entry_count; 721 722 policing = table->entries; 723 724 /* Setup shared indices for the matchall policers */ 725 for (port = 0; port < ds->num_ports; port++) { 726 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port; 727 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port; 728 729 for (tc = 0; tc < SJA1105_NUM_TC; tc++) 730 policing[port * SJA1105_NUM_TC + tc].sharindx = port; 731 732 policing[bcast].sharindx = port; 733 /* Only SJA1110 has multicast policers */ 734 if (mcast <= table->ops->max_entry_count) 735 policing[mcast].sharindx = port; 736 } 737 738 /* Setup the matchall policer parameters */ 739 for (port = 0; port < ds->num_ports; port++) { 740 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 741 742 if (dsa_is_cpu_port(priv->ds, port)) 743 mtu += VLAN_HLEN; 744 745 policing[port].smax = 65535; /* Burst size in bytes */ 746 policing[port].rate = SJA1105_RATE_MBPS(1000); 747 policing[port].maxlen = mtu; 748 policing[port].partition = 0; 749 } 750 751 return 0; 752 } 753 754 static int sja1105_static_config_load(struct sja1105_private *priv, 755 struct sja1105_dt_port *ports) 756 { 757 int rc; 758 759 sja1105_static_config_free(&priv->static_config); 760 rc = sja1105_static_config_init(&priv->static_config, 761 priv->info->static_ops, 762 priv->info->device_id); 763 if (rc) 764 return rc; 765 766 /* Build static configuration */ 767 rc = sja1105_init_mac_settings(priv); 768 if (rc < 0) 769 return rc; 770 rc = sja1105_init_mii_settings(priv, ports); 771 if (rc < 0) 772 return rc; 773 rc = sja1105_init_static_fdb(priv); 774 if (rc < 0) 775 return rc; 776 rc = sja1105_init_static_vlan(priv); 777 if (rc < 0) 778 return rc; 779 rc = sja1105_init_l2_lookup_params(priv); 780 if (rc < 0) 781 return rc; 782 rc = sja1105_init_l2_forwarding(priv); 783 if (rc < 0) 784 return rc; 785 rc = sja1105_init_l2_forwarding_params(priv); 786 if (rc < 0) 787 return rc; 788 rc = sja1105_init_l2_policing(priv); 789 if (rc < 0) 790 return rc; 791 rc = sja1105_init_general_params(priv); 792 if (rc < 0) 793 return rc; 794 rc = sja1105_init_avb_params(priv); 795 if (rc < 0) 796 return rc; 797 798 /* Send initial configuration to hardware via SPI */ 799 return sja1105_static_config_upload(priv); 800 } 801 802 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 803 const struct sja1105_dt_port *ports) 804 { 805 struct dsa_switch *ds = priv->ds; 806 int i; 807 808 for (i = 0; i < ds->num_ports; i++) { 809 if (ports[i].role == XMII_MAC) 810 continue; 811 812 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 813 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 814 priv->rgmii_rx_delay[i] = true; 815 816 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 817 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 818 priv->rgmii_tx_delay[i] = true; 819 820 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 821 !priv->info->setup_rgmii_delay) 822 return -EINVAL; 823 } 824 return 0; 825 } 826 827 static int sja1105_parse_ports_node(struct sja1105_private *priv, 828 struct sja1105_dt_port *ports, 829 struct device_node *ports_node) 830 { 831 struct device *dev = &priv->spidev->dev; 832 struct device_node *child; 833 834 for_each_available_child_of_node(ports_node, child) { 835 struct device_node *phy_node; 836 phy_interface_t phy_mode; 837 u32 index; 838 int err; 839 840 /* Get switch port number from DT */ 841 if (of_property_read_u32(child, "reg", &index) < 0) { 842 dev_err(dev, "Port number not defined in device tree " 843 "(property \"reg\")\n"); 844 of_node_put(child); 845 return -ENODEV; 846 } 847 848 /* Get PHY mode from DT */ 849 err = of_get_phy_mode(child, &phy_mode); 850 if (err) { 851 dev_err(dev, "Failed to read phy-mode or " 852 "phy-interface-type property for port %d\n", 853 index); 854 of_node_put(child); 855 return -ENODEV; 856 } 857 ports[index].phy_mode = phy_mode; 858 859 phy_node = of_parse_phandle(child, "phy-handle", 0); 860 if (!phy_node) { 861 if (!of_phy_is_fixed_link(child)) { 862 dev_err(dev, "phy-handle or fixed-link " 863 "properties missing!\n"); 864 of_node_put(child); 865 return -ENODEV; 866 } 867 /* phy-handle is missing, but fixed-link isn't. 868 * So it's a fixed link. Default to PHY role. 869 */ 870 ports[index].role = XMII_PHY; 871 } else { 872 /* phy-handle present => put port in MAC role */ 873 ports[index].role = XMII_MAC; 874 of_node_put(phy_node); 875 } 876 877 /* The MAC/PHY role can be overridden with explicit bindings */ 878 if (of_property_read_bool(child, "sja1105,role-mac")) 879 ports[index].role = XMII_MAC; 880 else if (of_property_read_bool(child, "sja1105,role-phy")) 881 ports[index].role = XMII_PHY; 882 883 priv->phy_mode[index] = phy_mode; 884 } 885 886 return 0; 887 } 888 889 static int sja1105_parse_dt(struct sja1105_private *priv, 890 struct sja1105_dt_port *ports) 891 { 892 struct device *dev = &priv->spidev->dev; 893 struct device_node *switch_node = dev->of_node; 894 struct device_node *ports_node; 895 int rc; 896 897 ports_node = of_get_child_by_name(switch_node, "ports"); 898 if (!ports_node) 899 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 900 if (!ports_node) { 901 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 902 return -ENODEV; 903 } 904 905 rc = sja1105_parse_ports_node(priv, ports, ports_node); 906 of_node_put(ports_node); 907 908 return rc; 909 } 910 911 static int sja1105_sgmii_read(struct sja1105_private *priv, int port, int mmd, 912 int pcs_reg) 913 { 914 u64 addr = (mmd << 16) | pcs_reg; 915 u32 val; 916 int rc; 917 918 if (port != SJA1105_SGMII_PORT) 919 return -ENODEV; 920 921 rc = sja1105_xfer_u32(priv, SPI_READ, addr, &val, NULL); 922 if (rc < 0) 923 return rc; 924 925 return val; 926 } 927 928 static int sja1105_sgmii_write(struct sja1105_private *priv, int port, int mmd, 929 int pcs_reg, u16 pcs_val) 930 { 931 u64 addr = (mmd << 16) | pcs_reg; 932 u32 val = pcs_val; 933 int rc; 934 935 if (port != SJA1105_SGMII_PORT) 936 return -ENODEV; 937 938 rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &val, NULL); 939 if (rc < 0) 940 return rc; 941 942 return val; 943 } 944 945 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv, int port, 946 bool an_enabled, bool an_master) 947 { 948 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII; 949 950 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to 951 * stop the clock during LPI mode, make the MAC reconfigure 952 * autonomously after PCS autoneg is done, flush the internal FIFOs. 953 */ 954 sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, SJA1105_DC1, 955 SJA1105_DC1_EN_VSMMD1 | 956 SJA1105_DC1_CLOCK_STOP_EN | 957 SJA1105_DC1_MAC_AUTO_SW | 958 SJA1105_DC1_INIT); 959 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */ 960 sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, SJA1105_DC2, 961 SJA1105_DC2_TX_POL_INV_DISABLE); 962 /* AUTONEG_CONTROL: Use SGMII autoneg */ 963 if (an_master) 964 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK; 965 sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, SJA1105_AC, ac); 966 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise, 967 * sja1105_sgmii_pcs_force_speed must be called later for the link 968 * to become operational. 969 */ 970 if (an_enabled) 971 sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, MDIO_CTRL1, 972 BMCR_ANENABLE | BMCR_ANRESTART); 973 } 974 975 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv, 976 int port, int speed) 977 { 978 int pcs_speed; 979 980 switch (speed) { 981 case SPEED_1000: 982 pcs_speed = BMCR_SPEED1000; 983 break; 984 case SPEED_100: 985 pcs_speed = BMCR_SPEED100; 986 break; 987 case SPEED_10: 988 pcs_speed = BMCR_SPEED10; 989 break; 990 default: 991 dev_err(priv->ds->dev, "Invalid speed %d\n", speed); 992 return; 993 } 994 sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, MDIO_CTRL1, 995 pcs_speed | BMCR_FULLDPLX); 996 } 997 998 /* Convert link speed from SJA1105 to ethtool encoding */ 999 static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv, 1000 u64 speed) 1001 { 1002 if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) 1003 return SPEED_10; 1004 if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) 1005 return SPEED_100; 1006 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) 1007 return SPEED_1000; 1008 if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS]) 1009 return SPEED_2500; 1010 return SPEED_UNKNOWN; 1011 } 1012 1013 /* Set link speed in the MAC configuration for a specific port. */ 1014 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 1015 int speed_mbps) 1016 { 1017 struct sja1105_mac_config_entry *mac; 1018 struct device *dev = priv->ds->dev; 1019 u64 speed; 1020 int rc; 1021 1022 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 1023 * tables. On E/T, MAC reconfig tables are not readable, only writable. 1024 * We have to *know* what the MAC looks like. For the sake of keeping 1025 * the code common, we'll use the static configuration tables as a 1026 * reasonable approximation for both E/T and P/Q/R/S. 1027 */ 1028 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1029 1030 switch (speed_mbps) { 1031 case SPEED_UNKNOWN: 1032 /* PHYLINK called sja1105_mac_config() to inform us about 1033 * the state->interface, but AN has not completed and the 1034 * speed is not yet valid. UM10944.pdf says that setting 1035 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 1036 * ok for power consumption in case AN will never complete - 1037 * otherwise PHYLINK should come back with a new update. 1038 */ 1039 speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; 1040 break; 1041 case SPEED_10: 1042 speed = priv->info->port_speed[SJA1105_SPEED_10MBPS]; 1043 break; 1044 case SPEED_100: 1045 speed = priv->info->port_speed[SJA1105_SPEED_100MBPS]; 1046 break; 1047 case SPEED_1000: 1048 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; 1049 break; 1050 default: 1051 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 1052 return -EINVAL; 1053 } 1054 1055 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 1056 * table, since this will be used for the clocking setup, and we no 1057 * longer need to store it in the static config (already told hardware 1058 * we want auto during upload phase). 1059 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and 1060 * we need to configure the PCS only (if even that). 1061 */ 1062 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII) 1063 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; 1064 else 1065 mac[port].speed = speed; 1066 1067 /* Write to the dynamic reconfiguration tables */ 1068 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1069 &mac[port], true); 1070 if (rc < 0) { 1071 dev_err(dev, "Failed to write MAC config: %d\n", rc); 1072 return rc; 1073 } 1074 1075 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 1076 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 1077 * RMII no change of the clock setup is required. Actually, changing 1078 * the clock setup does interrupt the clock signal for a certain time 1079 * which causes trouble for all PHYs relying on this signal. 1080 */ 1081 if (!phy_interface_mode_is_rgmii(priv->phy_mode[port])) 1082 return 0; 1083 1084 return sja1105_clocking_setup_port(priv, port); 1085 } 1086 1087 /* The SJA1105 MAC programming model is through the static config (the xMII 1088 * Mode table cannot be dynamically reconfigured), and we have to program 1089 * that early (earlier than PHYLINK calls us, anyway). 1090 * So just error out in case the connected PHY attempts to change the initial 1091 * system interface MII protocol from what is defined in the DT, at least for 1092 * now. 1093 */ 1094 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 1095 phy_interface_t interface) 1096 { 1097 return priv->phy_mode[port] != interface; 1098 } 1099 1100 static void sja1105_mac_config(struct dsa_switch *ds, int port, 1101 unsigned int mode, 1102 const struct phylink_link_state *state) 1103 { 1104 struct sja1105_private *priv = ds->priv; 1105 bool is_sgmii; 1106 1107 is_sgmii = (state->interface == PHY_INTERFACE_MODE_SGMII); 1108 1109 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1110 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", 1111 phy_modes(state->interface)); 1112 return; 1113 } 1114 1115 if (phylink_autoneg_inband(mode) && !is_sgmii) { 1116 dev_err(ds->dev, "In-band AN not supported!\n"); 1117 return; 1118 } 1119 1120 if (is_sgmii) 1121 sja1105_sgmii_pcs_config(priv, port, 1122 phylink_autoneg_inband(mode), 1123 false); 1124 } 1125 1126 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 1127 unsigned int mode, 1128 phy_interface_t interface) 1129 { 1130 sja1105_inhibit_tx(ds->priv, BIT(port), true); 1131 } 1132 1133 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 1134 unsigned int mode, 1135 phy_interface_t interface, 1136 struct phy_device *phydev, 1137 int speed, int duplex, 1138 bool tx_pause, bool rx_pause) 1139 { 1140 struct sja1105_private *priv = ds->priv; 1141 1142 sja1105_adjust_port_config(priv, port, speed); 1143 1144 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII && 1145 !phylink_autoneg_inband(mode)) 1146 sja1105_sgmii_pcs_force_speed(priv, port, speed); 1147 1148 sja1105_inhibit_tx(priv, BIT(port), false); 1149 } 1150 1151 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 1152 unsigned long *supported, 1153 struct phylink_link_state *state) 1154 { 1155 /* Construct a new mask which exhaustively contains all link features 1156 * supported by the MAC, and then apply that (logical AND) to what will 1157 * be sent to the PHY for "marketing". 1158 */ 1159 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1160 struct sja1105_private *priv = ds->priv; 1161 struct sja1105_xmii_params_entry *mii; 1162 1163 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1164 1165 /* include/linux/phylink.h says: 1166 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 1167 * expects the MAC driver to return all supported link modes. 1168 */ 1169 if (state->interface != PHY_INTERFACE_MODE_NA && 1170 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1171 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1172 return; 1173 } 1174 1175 /* The MAC does not support pause frames, and also doesn't 1176 * support half-duplex traffic modes. 1177 */ 1178 phylink_set(mask, Autoneg); 1179 phylink_set(mask, MII); 1180 phylink_set(mask, 10baseT_Full); 1181 phylink_set(mask, 100baseT_Full); 1182 phylink_set(mask, 100baseT1_Full); 1183 if (mii->xmii_mode[port] == XMII_MODE_RGMII || 1184 mii->xmii_mode[port] == XMII_MODE_SGMII) 1185 phylink_set(mask, 1000baseT_Full); 1186 1187 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 1188 bitmap_and(state->advertising, state->advertising, mask, 1189 __ETHTOOL_LINK_MODE_MASK_NBITS); 1190 } 1191 1192 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port, 1193 struct phylink_link_state *state) 1194 { 1195 struct sja1105_private *priv = ds->priv; 1196 int ais; 1197 1198 /* Read the vendor-specific AUTONEG_INTR_STATUS register */ 1199 ais = sja1105_sgmii_read(priv, port, MDIO_MMD_VEND2, SJA1105_AIS); 1200 if (ais < 0) 1201 return ais; 1202 1203 switch (SJA1105_AIS_SPEED(ais)) { 1204 case 0: 1205 state->speed = SPEED_10; 1206 break; 1207 case 1: 1208 state->speed = SPEED_100; 1209 break; 1210 case 2: 1211 state->speed = SPEED_1000; 1212 break; 1213 default: 1214 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n", 1215 SJA1105_AIS_SPEED(ais)); 1216 } 1217 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais); 1218 state->an_complete = SJA1105_AIS_COMPLETE(ais); 1219 state->link = SJA1105_AIS_LINK_STATUS(ais); 1220 1221 return 0; 1222 } 1223 1224 static int 1225 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 1226 const struct sja1105_l2_lookup_entry *requested) 1227 { 1228 struct sja1105_l2_lookup_entry *l2_lookup; 1229 struct sja1105_table *table; 1230 int i; 1231 1232 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1233 l2_lookup = table->entries; 1234 1235 for (i = 0; i < table->entry_count; i++) 1236 if (l2_lookup[i].macaddr == requested->macaddr && 1237 l2_lookup[i].vlanid == requested->vlanid && 1238 l2_lookup[i].destports & BIT(port)) 1239 return i; 1240 1241 return -1; 1242 } 1243 1244 /* We want FDB entries added statically through the bridge command to persist 1245 * across switch resets, which are a common thing during normal SJA1105 1246 * operation. So we have to back them up in the static configuration tables 1247 * and hence apply them on next static config upload... yay! 1248 */ 1249 static int 1250 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 1251 const struct sja1105_l2_lookup_entry *requested, 1252 bool keep) 1253 { 1254 struct sja1105_l2_lookup_entry *l2_lookup; 1255 struct sja1105_table *table; 1256 int rc, match; 1257 1258 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1259 1260 match = sja1105_find_static_fdb_entry(priv, port, requested); 1261 if (match < 0) { 1262 /* Can't delete a missing entry. */ 1263 if (!keep) 1264 return 0; 1265 1266 /* No match => new entry */ 1267 rc = sja1105_table_resize(table, table->entry_count + 1); 1268 if (rc) 1269 return rc; 1270 1271 match = table->entry_count - 1; 1272 } 1273 1274 /* Assign pointer after the resize (it may be new memory) */ 1275 l2_lookup = table->entries; 1276 1277 /* We have a match. 1278 * If the job was to add this FDB entry, it's already done (mostly 1279 * anyway, since the port forwarding mask may have changed, case in 1280 * which we update it). 1281 * Otherwise we have to delete it. 1282 */ 1283 if (keep) { 1284 l2_lookup[match] = *requested; 1285 return 0; 1286 } 1287 1288 /* To remove, the strategy is to overwrite the element with 1289 * the last one, and then reduce the array size by 1 1290 */ 1291 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 1292 return sja1105_table_resize(table, table->entry_count - 1); 1293 } 1294 1295 /* First-generation switches have a 4-way set associative TCAM that 1296 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 1297 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 1298 * For the placement of a newly learnt FDB entry, the switch selects the bin 1299 * based on a hash function, and the way within that bin incrementally. 1300 */ 1301 static int sja1105et_fdb_index(int bin, int way) 1302 { 1303 return bin * SJA1105ET_FDB_BIN_SIZE + way; 1304 } 1305 1306 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 1307 const u8 *addr, u16 vid, 1308 struct sja1105_l2_lookup_entry *match, 1309 int *last_unused) 1310 { 1311 int way; 1312 1313 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 1314 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1315 int index = sja1105et_fdb_index(bin, way); 1316 1317 /* Skip unused entries, optionally marking them 1318 * into the return value 1319 */ 1320 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1321 index, &l2_lookup)) { 1322 if (last_unused) 1323 *last_unused = way; 1324 continue; 1325 } 1326 1327 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 1328 l2_lookup.vlanid == vid) { 1329 if (match) 1330 *match = l2_lookup; 1331 return way; 1332 } 1333 } 1334 /* Return an invalid entry index if not found */ 1335 return -1; 1336 } 1337 1338 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 1339 const unsigned char *addr, u16 vid) 1340 { 1341 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1342 struct sja1105_private *priv = ds->priv; 1343 struct device *dev = ds->dev; 1344 int last_unused = -1; 1345 int bin, way, rc; 1346 1347 bin = sja1105et_fdb_hash(priv, addr, vid); 1348 1349 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1350 &l2_lookup, &last_unused); 1351 if (way >= 0) { 1352 /* We have an FDB entry. Is our port in the destination 1353 * mask? If yes, we need to do nothing. If not, we need 1354 * to rewrite the entry by adding this port to it. 1355 */ 1356 if (l2_lookup.destports & BIT(port)) 1357 return 0; 1358 l2_lookup.destports |= BIT(port); 1359 } else { 1360 int index = sja1105et_fdb_index(bin, way); 1361 1362 /* We don't have an FDB entry. We construct a new one and 1363 * try to find a place for it within the FDB table. 1364 */ 1365 l2_lookup.macaddr = ether_addr_to_u64(addr); 1366 l2_lookup.destports = BIT(port); 1367 l2_lookup.vlanid = vid; 1368 1369 if (last_unused >= 0) { 1370 way = last_unused; 1371 } else { 1372 /* Bin is full, need to evict somebody. 1373 * Choose victim at random. If you get these messages 1374 * often, you may need to consider changing the 1375 * distribution function: 1376 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1377 */ 1378 get_random_bytes(&way, sizeof(u8)); 1379 way %= SJA1105ET_FDB_BIN_SIZE; 1380 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1381 bin, addr, way); 1382 /* Evict entry */ 1383 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1384 index, NULL, false); 1385 } 1386 } 1387 l2_lookup.index = sja1105et_fdb_index(bin, way); 1388 1389 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1390 l2_lookup.index, &l2_lookup, 1391 true); 1392 if (rc < 0) 1393 return rc; 1394 1395 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1396 } 1397 1398 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1399 const unsigned char *addr, u16 vid) 1400 { 1401 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1402 struct sja1105_private *priv = ds->priv; 1403 int index, bin, way, rc; 1404 bool keep; 1405 1406 bin = sja1105et_fdb_hash(priv, addr, vid); 1407 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1408 &l2_lookup, NULL); 1409 if (way < 0) 1410 return 0; 1411 index = sja1105et_fdb_index(bin, way); 1412 1413 /* We have an FDB entry. Is our port in the destination mask? If yes, 1414 * we need to remove it. If the resulting port mask becomes empty, we 1415 * need to completely evict the FDB entry. 1416 * Otherwise we just write it back. 1417 */ 1418 l2_lookup.destports &= ~BIT(port); 1419 1420 if (l2_lookup.destports) 1421 keep = true; 1422 else 1423 keep = false; 1424 1425 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1426 index, &l2_lookup, keep); 1427 if (rc < 0) 1428 return rc; 1429 1430 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1431 } 1432 1433 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1434 const unsigned char *addr, u16 vid) 1435 { 1436 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1437 struct sja1105_private *priv = ds->priv; 1438 int rc, i; 1439 1440 /* Search for an existing entry in the FDB table */ 1441 l2_lookup.macaddr = ether_addr_to_u64(addr); 1442 l2_lookup.vlanid = vid; 1443 l2_lookup.iotag = SJA1105_S_TAG; 1444 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1445 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { 1446 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1447 l2_lookup.mask_iotag = BIT(0); 1448 } else { 1449 l2_lookup.mask_vlanid = 0; 1450 l2_lookup.mask_iotag = 0; 1451 } 1452 l2_lookup.destports = BIT(port); 1453 1454 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1455 SJA1105_SEARCH, &l2_lookup); 1456 if (rc == 0) { 1457 /* Found and this port is already in the entry's 1458 * port mask => job done 1459 */ 1460 if (l2_lookup.destports & BIT(port)) 1461 return 0; 1462 /* l2_lookup.index is populated by the switch in case it 1463 * found something. 1464 */ 1465 l2_lookup.destports |= BIT(port); 1466 goto skip_finding_an_index; 1467 } 1468 1469 /* Not found, so try to find an unused spot in the FDB. 1470 * This is slightly inefficient because the strategy is knock-knock at 1471 * every possible position from 0 to 1023. 1472 */ 1473 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1474 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1475 i, NULL); 1476 if (rc < 0) 1477 break; 1478 } 1479 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1480 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1481 return -EINVAL; 1482 } 1483 l2_lookup.lockeds = true; 1484 l2_lookup.index = i; 1485 1486 skip_finding_an_index: 1487 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1488 l2_lookup.index, &l2_lookup, 1489 true); 1490 if (rc < 0) 1491 return rc; 1492 1493 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1494 } 1495 1496 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1497 const unsigned char *addr, u16 vid) 1498 { 1499 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1500 struct sja1105_private *priv = ds->priv; 1501 bool keep; 1502 int rc; 1503 1504 l2_lookup.macaddr = ether_addr_to_u64(addr); 1505 l2_lookup.vlanid = vid; 1506 l2_lookup.iotag = SJA1105_S_TAG; 1507 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1508 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { 1509 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1510 l2_lookup.mask_iotag = BIT(0); 1511 } else { 1512 l2_lookup.mask_vlanid = 0; 1513 l2_lookup.mask_iotag = 0; 1514 } 1515 l2_lookup.destports = BIT(port); 1516 1517 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1518 SJA1105_SEARCH, &l2_lookup); 1519 if (rc < 0) 1520 return 0; 1521 1522 l2_lookup.destports &= ~BIT(port); 1523 1524 /* Decide whether we remove just this port from the FDB entry, 1525 * or if we remove it completely. 1526 */ 1527 if (l2_lookup.destports) 1528 keep = true; 1529 else 1530 keep = false; 1531 1532 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1533 l2_lookup.index, &l2_lookup, keep); 1534 if (rc < 0) 1535 return rc; 1536 1537 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1538 } 1539 1540 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1541 const unsigned char *addr, u16 vid) 1542 { 1543 struct sja1105_private *priv = ds->priv; 1544 1545 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1546 * so the switch still does some VLAN processing internally. 1547 * But Shared VLAN Learning (SVL) is also active, and it will take 1548 * care of autonomous forwarding between the unique pvid's of each 1549 * port. Here we just make sure that users can't add duplicate FDB 1550 * entries when in this mode - the actual VID doesn't matter except 1551 * for what gets printed in 'bridge fdb show'. In the case of zero, 1552 * no VID gets printed at all. 1553 */ 1554 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) 1555 vid = 0; 1556 1557 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1558 } 1559 1560 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1561 const unsigned char *addr, u16 vid) 1562 { 1563 struct sja1105_private *priv = ds->priv; 1564 1565 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) 1566 vid = 0; 1567 1568 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1569 } 1570 1571 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1572 dsa_fdb_dump_cb_t *cb, void *data) 1573 { 1574 struct sja1105_private *priv = ds->priv; 1575 struct device *dev = ds->dev; 1576 int i; 1577 1578 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1579 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1580 u8 macaddr[ETH_ALEN]; 1581 int rc; 1582 1583 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1584 i, &l2_lookup); 1585 /* No fdb entry at i, not an issue */ 1586 if (rc == -ENOENT) 1587 continue; 1588 if (rc) { 1589 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1590 return rc; 1591 } 1592 1593 /* FDB dump callback is per port. This means we have to 1594 * disregard a valid entry if it's not for this port, even if 1595 * only to revisit it later. This is inefficient because the 1596 * 1024-sized FDB table needs to be traversed 4 times through 1597 * SPI during a 'bridge fdb show' command. 1598 */ 1599 if (!(l2_lookup.destports & BIT(port))) 1600 continue; 1601 1602 /* We need to hide the FDB entry for unknown multicast */ 1603 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST && 1604 l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST) 1605 continue; 1606 1607 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1608 1609 /* We need to hide the dsa_8021q VLANs from the user. */ 1610 if (priv->vlan_state == SJA1105_VLAN_UNAWARE) 1611 l2_lookup.vlanid = 0; 1612 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1613 } 1614 return 0; 1615 } 1616 1617 static int sja1105_mdb_add(struct dsa_switch *ds, int port, 1618 const struct switchdev_obj_port_mdb *mdb) 1619 { 1620 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1621 } 1622 1623 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1624 const struct switchdev_obj_port_mdb *mdb) 1625 { 1626 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1627 } 1628 1629 /* Common function for unicast and broadcast flood configuration. 1630 * Flooding is configured between each {ingress, egress} port pair, and since 1631 * the bridge's semantics are those of "egress flooding", it means we must 1632 * enable flooding towards this port from all ingress ports that are in the 1633 * same forwarding domain. 1634 */ 1635 static int sja1105_manage_flood_domains(struct sja1105_private *priv) 1636 { 1637 struct sja1105_l2_forwarding_entry *l2_fwd; 1638 struct dsa_switch *ds = priv->ds; 1639 int from, to, rc; 1640 1641 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1642 1643 for (from = 0; from < ds->num_ports; from++) { 1644 u64 fl_domain = 0, bc_domain = 0; 1645 1646 for (to = 0; to < priv->ds->num_ports; to++) { 1647 if (!sja1105_can_forward(l2_fwd, from, to)) 1648 continue; 1649 1650 if (priv->ucast_egress_floods & BIT(to)) 1651 fl_domain |= BIT(to); 1652 if (priv->bcast_egress_floods & BIT(to)) 1653 bc_domain |= BIT(to); 1654 } 1655 1656 /* Nothing changed, nothing to do */ 1657 if (l2_fwd[from].fl_domain == fl_domain && 1658 l2_fwd[from].bc_domain == bc_domain) 1659 continue; 1660 1661 l2_fwd[from].fl_domain = fl_domain; 1662 l2_fwd[from].bc_domain = bc_domain; 1663 1664 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1665 from, &l2_fwd[from], true); 1666 if (rc < 0) 1667 return rc; 1668 } 1669 1670 return 0; 1671 } 1672 1673 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1674 struct net_device *br, bool member) 1675 { 1676 struct sja1105_l2_forwarding_entry *l2_fwd; 1677 struct sja1105_private *priv = ds->priv; 1678 int i, rc; 1679 1680 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1681 1682 for (i = 0; i < ds->num_ports; i++) { 1683 /* Add this port to the forwarding matrix of the 1684 * other ports in the same bridge, and viceversa. 1685 */ 1686 if (!dsa_is_user_port(ds, i)) 1687 continue; 1688 /* For the ports already under the bridge, only one thing needs 1689 * to be done, and that is to add this port to their 1690 * reachability domain. So we can perform the SPI write for 1691 * them immediately. However, for this port itself (the one 1692 * that is new to the bridge), we need to add all other ports 1693 * to its reachability domain. So we do that incrementally in 1694 * this loop, and perform the SPI write only at the end, once 1695 * the domain contains all other bridge ports. 1696 */ 1697 if (i == port) 1698 continue; 1699 if (dsa_to_port(ds, i)->bridge_dev != br) 1700 continue; 1701 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1702 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1703 1704 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1705 i, &l2_fwd[i], true); 1706 if (rc < 0) 1707 return rc; 1708 } 1709 1710 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1711 port, &l2_fwd[port], true); 1712 if (rc) 1713 return rc; 1714 1715 return sja1105_manage_flood_domains(priv); 1716 } 1717 1718 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1719 u8 state) 1720 { 1721 struct sja1105_private *priv = ds->priv; 1722 struct sja1105_mac_config_entry *mac; 1723 1724 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1725 1726 switch (state) { 1727 case BR_STATE_DISABLED: 1728 case BR_STATE_BLOCKING: 1729 /* From UM10944 description of DRPDTAG (why put this there?): 1730 * "Management traffic flows to the port regardless of the state 1731 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1732 * At the moment no difference between DISABLED and BLOCKING. 1733 */ 1734 mac[port].ingress = false; 1735 mac[port].egress = false; 1736 mac[port].dyn_learn = false; 1737 break; 1738 case BR_STATE_LISTENING: 1739 mac[port].ingress = true; 1740 mac[port].egress = false; 1741 mac[port].dyn_learn = false; 1742 break; 1743 case BR_STATE_LEARNING: 1744 mac[port].ingress = true; 1745 mac[port].egress = false; 1746 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); 1747 break; 1748 case BR_STATE_FORWARDING: 1749 mac[port].ingress = true; 1750 mac[port].egress = true; 1751 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); 1752 break; 1753 default: 1754 dev_err(ds->dev, "invalid STP state: %d\n", state); 1755 return; 1756 } 1757 1758 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1759 &mac[port], true); 1760 } 1761 1762 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1763 struct net_device *br) 1764 { 1765 return sja1105_bridge_member(ds, port, br, true); 1766 } 1767 1768 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1769 struct net_device *br) 1770 { 1771 sja1105_bridge_member(ds, port, br, false); 1772 } 1773 1774 #define BYTES_PER_KBIT (1000LL / 8) 1775 1776 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) 1777 { 1778 int i; 1779 1780 for (i = 0; i < priv->info->num_cbs_shapers; i++) 1781 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) 1782 return i; 1783 1784 return -1; 1785 } 1786 1787 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port, 1788 int prio) 1789 { 1790 int i; 1791 1792 for (i = 0; i < priv->info->num_cbs_shapers; i++) { 1793 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; 1794 1795 if (cbs->port == port && cbs->prio == prio) { 1796 memset(cbs, 0, sizeof(*cbs)); 1797 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, 1798 i, cbs, true); 1799 } 1800 } 1801 1802 return 0; 1803 } 1804 1805 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, 1806 struct tc_cbs_qopt_offload *offload) 1807 { 1808 struct sja1105_private *priv = ds->priv; 1809 struct sja1105_cbs_entry *cbs; 1810 int index; 1811 1812 if (!offload->enable) 1813 return sja1105_delete_cbs_shaper(priv, port, offload->queue); 1814 1815 index = sja1105_find_unused_cbs_shaper(priv); 1816 if (index < 0) 1817 return -ENOSPC; 1818 1819 cbs = &priv->cbs[index]; 1820 cbs->port = port; 1821 cbs->prio = offload->queue; 1822 /* locredit and sendslope are negative by definition. In hardware, 1823 * positive values must be provided, and the negative sign is implicit. 1824 */ 1825 cbs->credit_hi = offload->hicredit; 1826 cbs->credit_lo = abs(offload->locredit); 1827 /* User space is in kbits/sec, hardware in bytes/sec */ 1828 cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; 1829 cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); 1830 /* Convert the negative values from 64-bit 2's complement 1831 * to 32-bit 2's complement (for the case of 0x80000000 whose 1832 * negative is still negative). 1833 */ 1834 cbs->credit_lo &= GENMASK_ULL(31, 0); 1835 cbs->send_slope &= GENMASK_ULL(31, 0); 1836 1837 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs, 1838 true); 1839 } 1840 1841 static int sja1105_reload_cbs(struct sja1105_private *priv) 1842 { 1843 int rc = 0, i; 1844 1845 for (i = 0; i < priv->info->num_cbs_shapers; i++) { 1846 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; 1847 1848 if (!cbs->idle_slope && !cbs->send_slope) 1849 continue; 1850 1851 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs, 1852 true); 1853 if (rc) 1854 break; 1855 } 1856 1857 return rc; 1858 } 1859 1860 static const char * const sja1105_reset_reasons[] = { 1861 [SJA1105_VLAN_FILTERING] = "VLAN filtering", 1862 [SJA1105_RX_HWTSTAMPING] = "RX timestamping", 1863 [SJA1105_AGEING_TIME] = "Ageing time", 1864 [SJA1105_SCHEDULING] = "Time-aware scheduling", 1865 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing", 1866 [SJA1105_VIRTUAL_LINKS] = "Virtual links", 1867 }; 1868 1869 /* For situations where we need to change a setting at runtime that is only 1870 * available through the static configuration, resetting the switch in order 1871 * to upload the new static config is unavoidable. Back up the settings we 1872 * modify at runtime (currently only MAC) and restore them after uploading, 1873 * such that this operation is relatively seamless. 1874 */ 1875 int sja1105_static_config_reload(struct sja1105_private *priv, 1876 enum sja1105_reset_reason reason) 1877 { 1878 struct ptp_system_timestamp ptp_sts_before; 1879 struct ptp_system_timestamp ptp_sts_after; 1880 int speed_mbps[SJA1105_MAX_NUM_PORTS]; 1881 u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0}; 1882 struct sja1105_mac_config_entry *mac; 1883 struct dsa_switch *ds = priv->ds; 1884 s64 t1, t2, t3, t4; 1885 s64 t12, t34; 1886 int rc, i; 1887 s64 now; 1888 1889 mutex_lock(&priv->mgmt_lock); 1890 1891 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1892 1893 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1894 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1895 * switch wants to see in the static config in order to allow us to 1896 * change it through the dynamic interface later. 1897 */ 1898 for (i = 0; i < ds->num_ports; i++) { 1899 speed_mbps[i] = sja1105_port_speed_to_ethtool(priv, 1900 mac[i].speed); 1901 mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; 1902 1903 if (priv->phy_mode[i] == PHY_INTERFACE_MODE_SGMII) 1904 bmcr[i] = sja1105_sgmii_read(priv, i, 1905 MDIO_MMD_VEND2, 1906 MDIO_CTRL1); 1907 } 1908 1909 /* No PTP operations can run right now */ 1910 mutex_lock(&priv->ptp_data.lock); 1911 1912 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before); 1913 if (rc < 0) 1914 goto out_unlock_ptp; 1915 1916 /* Reset switch and send updated static configuration */ 1917 rc = sja1105_static_config_upload(priv); 1918 if (rc < 0) 1919 goto out_unlock_ptp; 1920 1921 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after); 1922 if (rc < 0) 1923 goto out_unlock_ptp; 1924 1925 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts); 1926 t2 = timespec64_to_ns(&ptp_sts_before.post_ts); 1927 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts); 1928 t4 = timespec64_to_ns(&ptp_sts_after.post_ts); 1929 /* Mid point, corresponds to pre-reset PTPCLKVAL */ 1930 t12 = t1 + (t2 - t1) / 2; 1931 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ 1932 t34 = t3 + (t4 - t3) / 2; 1933 /* Advance PTPCLKVAL by the time it took since its readout */ 1934 now += (t34 - t12); 1935 1936 __sja1105_ptp_adjtime(ds, now); 1937 1938 out_unlock_ptp: 1939 mutex_unlock(&priv->ptp_data.lock); 1940 1941 dev_info(priv->ds->dev, 1942 "Reset switch and programmed static config. Reason: %s\n", 1943 sja1105_reset_reasons[reason]); 1944 1945 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1946 * For these interfaces there is no dynamic configuration 1947 * needed, since PLLs have same settings at all speeds. 1948 */ 1949 rc = priv->info->clocking_setup(priv); 1950 if (rc < 0) 1951 goto out; 1952 1953 for (i = 0; i < ds->num_ports; i++) { 1954 bool an_enabled; 1955 1956 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1957 if (rc < 0) 1958 goto out; 1959 1960 if (priv->phy_mode[i] != PHY_INTERFACE_MODE_SGMII) 1961 continue; 1962 1963 an_enabled = !!(bmcr[i] & BMCR_ANENABLE); 1964 1965 sja1105_sgmii_pcs_config(priv, i, an_enabled, false); 1966 1967 if (!an_enabled) { 1968 int speed = SPEED_UNKNOWN; 1969 1970 if (bmcr[i] & BMCR_SPEED1000) 1971 speed = SPEED_1000; 1972 else if (bmcr[i] & BMCR_SPEED100) 1973 speed = SPEED_100; 1974 else 1975 speed = SPEED_10; 1976 1977 sja1105_sgmii_pcs_force_speed(priv, i, speed); 1978 } 1979 } 1980 1981 rc = sja1105_reload_cbs(priv); 1982 if (rc < 0) 1983 goto out; 1984 out: 1985 mutex_unlock(&priv->mgmt_lock); 1986 1987 return rc; 1988 } 1989 1990 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1991 { 1992 struct sja1105_mac_config_entry *mac; 1993 1994 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1995 1996 mac[port].vlanid = pvid; 1997 1998 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1999 &mac[port], true); 2000 } 2001 2002 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds, 2003 int tree_index, int sw_index, 2004 int other_port, struct net_device *br) 2005 { 2006 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); 2007 struct sja1105_private *other_priv = other_ds->priv; 2008 struct sja1105_private *priv = ds->priv; 2009 int port, rc; 2010 2011 if (other_ds->ops != &sja1105_switch_ops) 2012 return 0; 2013 2014 for (port = 0; port < ds->num_ports; port++) { 2015 if (!dsa_is_user_port(ds, port)) 2016 continue; 2017 if (dsa_to_port(ds, port)->bridge_dev != br) 2018 continue; 2019 2020 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx, 2021 port, 2022 other_priv->dsa_8021q_ctx, 2023 other_port); 2024 if (rc) 2025 return rc; 2026 2027 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx, 2028 other_port, 2029 priv->dsa_8021q_ctx, 2030 port); 2031 if (rc) 2032 return rc; 2033 } 2034 2035 return 0; 2036 } 2037 2038 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds, 2039 int tree_index, int sw_index, 2040 int other_port, 2041 struct net_device *br) 2042 { 2043 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); 2044 struct sja1105_private *other_priv = other_ds->priv; 2045 struct sja1105_private *priv = ds->priv; 2046 int port; 2047 2048 if (other_ds->ops != &sja1105_switch_ops) 2049 return; 2050 2051 for (port = 0; port < ds->num_ports; port++) { 2052 if (!dsa_is_user_port(ds, port)) 2053 continue; 2054 if (dsa_to_port(ds, port)->bridge_dev != br) 2055 continue; 2056 2057 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port, 2058 other_priv->dsa_8021q_ctx, 2059 other_port); 2060 2061 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx, 2062 other_port, 2063 priv->dsa_8021q_ctx, port); 2064 } 2065 } 2066 2067 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 2068 { 2069 struct sja1105_private *priv = ds->priv; 2070 int rc; 2071 2072 rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled); 2073 if (rc) 2074 return rc; 2075 2076 dev_info(ds->dev, "%s switch tagging\n", 2077 enabled ? "Enabled" : "Disabled"); 2078 return 0; 2079 } 2080 2081 static enum dsa_tag_protocol 2082 sja1105_get_tag_protocol(struct dsa_switch *ds, int port, 2083 enum dsa_tag_protocol mp) 2084 { 2085 return DSA_TAG_PROTO_SJA1105; 2086 } 2087 2088 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid) 2089 { 2090 int subvlan; 2091 2092 if (pvid) 2093 return 0; 2094 2095 for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2096 if (subvlan_map[subvlan] == VLAN_N_VID) 2097 return subvlan; 2098 2099 return -1; 2100 } 2101 2102 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid) 2103 { 2104 int subvlan; 2105 2106 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2107 if (subvlan_map[subvlan] == vid) 2108 return subvlan; 2109 2110 return -1; 2111 } 2112 2113 static int sja1105_find_committed_subvlan(struct sja1105_private *priv, 2114 int port, u16 vid) 2115 { 2116 struct sja1105_port *sp = &priv->ports[port]; 2117 2118 return sja1105_find_subvlan(sp->subvlan_map, vid); 2119 } 2120 2121 static void sja1105_init_subvlan_map(u16 *subvlan_map) 2122 { 2123 int subvlan; 2124 2125 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2126 subvlan_map[subvlan] = VLAN_N_VID; 2127 } 2128 2129 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port, 2130 u16 *subvlan_map) 2131 { 2132 struct sja1105_port *sp = &priv->ports[port]; 2133 int subvlan; 2134 2135 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2136 sp->subvlan_map[subvlan] = subvlan_map[subvlan]; 2137 } 2138 2139 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 2140 { 2141 struct sja1105_vlan_lookup_entry *vlan; 2142 int count, i; 2143 2144 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 2145 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 2146 2147 for (i = 0; i < count; i++) 2148 if (vlan[i].vlanid == vid) 2149 return i; 2150 2151 /* Return an invalid entry index if not found */ 2152 return -1; 2153 } 2154 2155 static int 2156 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging, 2157 int count, int from_port, u16 from_vid, 2158 u16 to_vid) 2159 { 2160 int i; 2161 2162 for (i = 0; i < count; i++) 2163 if (retagging[i].ing_port == BIT(from_port) && 2164 retagging[i].vlan_ing == from_vid && 2165 retagging[i].vlan_egr == to_vid) 2166 return i; 2167 2168 /* Return an invalid entry index if not found */ 2169 return -1; 2170 } 2171 2172 static int sja1105_commit_vlans(struct sja1105_private *priv, 2173 struct sja1105_vlan_lookup_entry *new_vlan, 2174 struct sja1105_retagging_entry *new_retagging, 2175 int num_retagging) 2176 { 2177 struct sja1105_retagging_entry *retagging; 2178 struct sja1105_vlan_lookup_entry *vlan; 2179 struct sja1105_table *table; 2180 int num_vlans = 0; 2181 int rc, i, k = 0; 2182 2183 /* VLAN table */ 2184 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2185 vlan = table->entries; 2186 2187 for (i = 0; i < VLAN_N_VID; i++) { 2188 int match = sja1105_is_vlan_configured(priv, i); 2189 2190 if (new_vlan[i].vlanid != VLAN_N_VID) 2191 num_vlans++; 2192 2193 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) { 2194 /* Was there before, no longer is. Delete */ 2195 dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i); 2196 rc = sja1105_dynamic_config_write(priv, 2197 BLK_IDX_VLAN_LOOKUP, 2198 i, &vlan[match], false); 2199 if (rc < 0) 2200 return rc; 2201 } else if (new_vlan[i].vlanid != VLAN_N_VID) { 2202 /* Nothing changed, don't do anything */ 2203 if (match >= 0 && 2204 vlan[match].vlanid == new_vlan[i].vlanid && 2205 vlan[match].tag_port == new_vlan[i].tag_port && 2206 vlan[match].vlan_bc == new_vlan[i].vlan_bc && 2207 vlan[match].vmemb_port == new_vlan[i].vmemb_port) 2208 continue; 2209 /* Update entry */ 2210 dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i); 2211 rc = sja1105_dynamic_config_write(priv, 2212 BLK_IDX_VLAN_LOOKUP, 2213 i, &new_vlan[i], 2214 true); 2215 if (rc < 0) 2216 return rc; 2217 } 2218 } 2219 2220 if (table->entry_count) 2221 kfree(table->entries); 2222 2223 table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size, 2224 GFP_KERNEL); 2225 if (!table->entries) 2226 return -ENOMEM; 2227 2228 table->entry_count = num_vlans; 2229 vlan = table->entries; 2230 2231 for (i = 0; i < VLAN_N_VID; i++) { 2232 if (new_vlan[i].vlanid == VLAN_N_VID) 2233 continue; 2234 vlan[k++] = new_vlan[i]; 2235 } 2236 2237 /* VLAN Retagging Table */ 2238 table = &priv->static_config.tables[BLK_IDX_RETAGGING]; 2239 retagging = table->entries; 2240 2241 for (i = 0; i < table->entry_count; i++) { 2242 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, 2243 i, &retagging[i], false); 2244 if (rc) 2245 return rc; 2246 } 2247 2248 if (table->entry_count) 2249 kfree(table->entries); 2250 2251 table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size, 2252 GFP_KERNEL); 2253 if (!table->entries) 2254 return -ENOMEM; 2255 2256 table->entry_count = num_retagging; 2257 retagging = table->entries; 2258 2259 for (i = 0; i < num_retagging; i++) { 2260 retagging[i] = new_retagging[i]; 2261 2262 /* Update entry */ 2263 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, 2264 i, &retagging[i], true); 2265 if (rc < 0) 2266 return rc; 2267 } 2268 2269 return 0; 2270 } 2271 2272 struct sja1105_crosschip_vlan { 2273 struct list_head list; 2274 u16 vid; 2275 bool untagged; 2276 int port; 2277 int other_port; 2278 struct dsa_8021q_context *other_ctx; 2279 }; 2280 2281 struct sja1105_crosschip_switch { 2282 struct list_head list; 2283 struct dsa_8021q_context *other_ctx; 2284 }; 2285 2286 static int sja1105_commit_pvid(struct sja1105_private *priv) 2287 { 2288 struct sja1105_bridge_vlan *v; 2289 struct list_head *vlan_list; 2290 int rc = 0; 2291 2292 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2293 vlan_list = &priv->bridge_vlans; 2294 else 2295 vlan_list = &priv->dsa_8021q_vlans; 2296 2297 list_for_each_entry(v, vlan_list, list) { 2298 if (v->pvid) { 2299 rc = sja1105_pvid_apply(priv, v->port, v->vid); 2300 if (rc) 2301 break; 2302 } 2303 } 2304 2305 return rc; 2306 } 2307 2308 static int 2309 sja1105_build_bridge_vlans(struct sja1105_private *priv, 2310 struct sja1105_vlan_lookup_entry *new_vlan) 2311 { 2312 struct sja1105_bridge_vlan *v; 2313 2314 if (priv->vlan_state == SJA1105_VLAN_UNAWARE) 2315 return 0; 2316 2317 list_for_each_entry(v, &priv->bridge_vlans, list) { 2318 int match = v->vid; 2319 2320 new_vlan[match].vlanid = v->vid; 2321 new_vlan[match].vmemb_port |= BIT(v->port); 2322 new_vlan[match].vlan_bc |= BIT(v->port); 2323 if (!v->untagged) 2324 new_vlan[match].tag_port |= BIT(v->port); 2325 } 2326 2327 return 0; 2328 } 2329 2330 static int 2331 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv, 2332 struct sja1105_vlan_lookup_entry *new_vlan) 2333 { 2334 struct sja1105_bridge_vlan *v; 2335 2336 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2337 return 0; 2338 2339 list_for_each_entry(v, &priv->dsa_8021q_vlans, list) { 2340 int match = v->vid; 2341 2342 new_vlan[match].vlanid = v->vid; 2343 new_vlan[match].vmemb_port |= BIT(v->port); 2344 new_vlan[match].vlan_bc |= BIT(v->port); 2345 if (!v->untagged) 2346 new_vlan[match].tag_port |= BIT(v->port); 2347 } 2348 2349 return 0; 2350 } 2351 2352 static int sja1105_build_subvlans(struct sja1105_private *priv, 2353 u16 subvlan_map[][DSA_8021Q_N_SUBVLAN], 2354 struct sja1105_vlan_lookup_entry *new_vlan, 2355 struct sja1105_retagging_entry *new_retagging, 2356 int *num_retagging) 2357 { 2358 struct sja1105_bridge_vlan *v; 2359 int k = *num_retagging; 2360 2361 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) 2362 return 0; 2363 2364 list_for_each_entry(v, &priv->bridge_vlans, list) { 2365 int upstream = dsa_upstream_port(priv->ds, v->port); 2366 int match, subvlan; 2367 u16 rx_vid; 2368 2369 /* Only sub-VLANs on user ports need to be applied. 2370 * Bridge VLANs also include VLANs added automatically 2371 * by DSA on the CPU port. 2372 */ 2373 if (!dsa_is_user_port(priv->ds, v->port)) 2374 continue; 2375 2376 subvlan = sja1105_find_subvlan(subvlan_map[v->port], 2377 v->vid); 2378 if (subvlan < 0) { 2379 subvlan = sja1105_find_free_subvlan(subvlan_map[v->port], 2380 v->pvid); 2381 if (subvlan < 0) { 2382 dev_err(priv->ds->dev, "No more free subvlans\n"); 2383 return -ENOSPC; 2384 } 2385 } 2386 2387 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan); 2388 2389 /* @v->vid on @v->port needs to be retagged to @rx_vid 2390 * on @upstream. Assume @v->vid on @v->port and on 2391 * @upstream was already configured by the previous 2392 * iteration over bridge_vlans. 2393 */ 2394 match = rx_vid; 2395 new_vlan[match].vlanid = rx_vid; 2396 new_vlan[match].vmemb_port |= BIT(v->port); 2397 new_vlan[match].vmemb_port |= BIT(upstream); 2398 new_vlan[match].vlan_bc |= BIT(v->port); 2399 new_vlan[match].vlan_bc |= BIT(upstream); 2400 /* The "untagged" flag is set the same as for the 2401 * original VLAN 2402 */ 2403 if (!v->untagged) 2404 new_vlan[match].tag_port |= BIT(v->port); 2405 /* But it's always tagged towards the CPU */ 2406 new_vlan[match].tag_port |= BIT(upstream); 2407 2408 /* The Retagging Table generates packet *clones* with 2409 * the new VLAN. This is a very odd hardware quirk 2410 * which we need to suppress by dropping the original 2411 * packet. 2412 * Deny egress of the original VLAN towards the CPU 2413 * port. This will force the switch to drop it, and 2414 * we'll see only the retagged packets. 2415 */ 2416 match = v->vid; 2417 new_vlan[match].vlan_bc &= ~BIT(upstream); 2418 2419 /* And the retagging itself */ 2420 new_retagging[k].vlan_ing = v->vid; 2421 new_retagging[k].vlan_egr = rx_vid; 2422 new_retagging[k].ing_port = BIT(v->port); 2423 new_retagging[k].egr_port = BIT(upstream); 2424 if (k++ == SJA1105_MAX_RETAGGING_COUNT) { 2425 dev_err(priv->ds->dev, "No more retagging rules\n"); 2426 return -ENOSPC; 2427 } 2428 2429 subvlan_map[v->port][subvlan] = v->vid; 2430 } 2431 2432 *num_retagging = k; 2433 2434 return 0; 2435 } 2436 2437 /* Sadly, in crosschip scenarios where the CPU port is also the link to another 2438 * switch, we should retag backwards (the dsa_8021q vid to the original vid) on 2439 * the CPU port of neighbour switches. 2440 */ 2441 static int 2442 sja1105_build_crosschip_subvlans(struct sja1105_private *priv, 2443 struct sja1105_vlan_lookup_entry *new_vlan, 2444 struct sja1105_retagging_entry *new_retagging, 2445 int *num_retagging) 2446 { 2447 struct sja1105_crosschip_vlan *tmp, *pos; 2448 struct dsa_8021q_crosschip_link *c; 2449 struct sja1105_bridge_vlan *v, *w; 2450 struct list_head crosschip_vlans; 2451 int k = *num_retagging; 2452 int rc = 0; 2453 2454 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) 2455 return 0; 2456 2457 INIT_LIST_HEAD(&crosschip_vlans); 2458 2459 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { 2460 struct sja1105_private *other_priv = c->other_ctx->ds->priv; 2461 2462 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2463 continue; 2464 2465 /* Crosschip links are also added to the CPU ports. 2466 * Ignore those. 2467 */ 2468 if (!dsa_is_user_port(priv->ds, c->port)) 2469 continue; 2470 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port)) 2471 continue; 2472 2473 /* Search for VLANs on the remote port */ 2474 list_for_each_entry(v, &other_priv->bridge_vlans, list) { 2475 bool already_added = false; 2476 bool we_have_it = false; 2477 2478 if (v->port != c->other_port) 2479 continue; 2480 2481 /* If @v is a pvid on @other_ds, it does not need 2482 * re-retagging, because its SVL field is 0 and we 2483 * already allow that, via the dsa_8021q crosschip 2484 * links. 2485 */ 2486 if (v->pvid) 2487 continue; 2488 2489 /* Search for the VLAN on our local port */ 2490 list_for_each_entry(w, &priv->bridge_vlans, list) { 2491 if (w->port == c->port && w->vid == v->vid) { 2492 we_have_it = true; 2493 break; 2494 } 2495 } 2496 2497 if (!we_have_it) 2498 continue; 2499 2500 list_for_each_entry(tmp, &crosschip_vlans, list) { 2501 if (tmp->vid == v->vid && 2502 tmp->untagged == v->untagged && 2503 tmp->port == c->port && 2504 tmp->other_port == v->port && 2505 tmp->other_ctx == c->other_ctx) { 2506 already_added = true; 2507 break; 2508 } 2509 } 2510 2511 if (already_added) 2512 continue; 2513 2514 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2515 if (!tmp) { 2516 dev_err(priv->ds->dev, "Failed to allocate memory\n"); 2517 rc = -ENOMEM; 2518 goto out; 2519 } 2520 tmp->vid = v->vid; 2521 tmp->port = c->port; 2522 tmp->other_port = v->port; 2523 tmp->other_ctx = c->other_ctx; 2524 tmp->untagged = v->untagged; 2525 list_add(&tmp->list, &crosschip_vlans); 2526 } 2527 } 2528 2529 list_for_each_entry(tmp, &crosschip_vlans, list) { 2530 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv; 2531 int upstream = dsa_upstream_port(priv->ds, tmp->port); 2532 int match, subvlan; 2533 u16 rx_vid; 2534 2535 subvlan = sja1105_find_committed_subvlan(other_priv, 2536 tmp->other_port, 2537 tmp->vid); 2538 /* If this happens, it's a bug. The neighbour switch does not 2539 * have a subvlan for tmp->vid on tmp->other_port, but it 2540 * should, since we already checked for its vlan_state. 2541 */ 2542 if (WARN_ON(subvlan < 0)) { 2543 rc = -EINVAL; 2544 goto out; 2545 } 2546 2547 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds, 2548 tmp->other_port, 2549 subvlan); 2550 2551 /* The @rx_vid retagged from @tmp->vid on 2552 * {@tmp->other_ds, @tmp->other_port} needs to be 2553 * re-retagged to @tmp->vid on the way back to us. 2554 * 2555 * Assume the original @tmp->vid is already configured 2556 * on this local switch, otherwise we wouldn't be 2557 * retagging its subvlan on the other switch in the 2558 * first place. We just need to add a reverse retagging 2559 * rule for @rx_vid and install @rx_vid on our ports. 2560 */ 2561 match = rx_vid; 2562 new_vlan[match].vlanid = rx_vid; 2563 new_vlan[match].vmemb_port |= BIT(tmp->port); 2564 new_vlan[match].vmemb_port |= BIT(upstream); 2565 /* The "untagged" flag is set the same as for the 2566 * original VLAN. And towards the CPU, it doesn't 2567 * really matter, because @rx_vid will only receive 2568 * traffic on that port. For consistency with other dsa_8021q 2569 * VLANs, we'll keep the CPU port tagged. 2570 */ 2571 if (!tmp->untagged) 2572 new_vlan[match].tag_port |= BIT(tmp->port); 2573 new_vlan[match].tag_port |= BIT(upstream); 2574 /* Deny egress of @rx_vid towards our front-panel port. 2575 * This will force the switch to drop it, and we'll see 2576 * only the re-retagged packets (having the original, 2577 * pre-initial-retagging, VLAN @tmp->vid). 2578 */ 2579 new_vlan[match].vlan_bc &= ~BIT(tmp->port); 2580 2581 /* On reverse retagging, the same ingress VLAN goes to multiple 2582 * ports. So we have an opportunity to create composite rules 2583 * to not waste the limited space in the retagging table. 2584 */ 2585 k = sja1105_find_retagging_entry(new_retagging, *num_retagging, 2586 upstream, rx_vid, tmp->vid); 2587 if (k < 0) { 2588 if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) { 2589 dev_err(priv->ds->dev, "No more retagging rules\n"); 2590 rc = -ENOSPC; 2591 goto out; 2592 } 2593 k = (*num_retagging)++; 2594 } 2595 /* And the retagging itself */ 2596 new_retagging[k].vlan_ing = rx_vid; 2597 new_retagging[k].vlan_egr = tmp->vid; 2598 new_retagging[k].ing_port = BIT(upstream); 2599 new_retagging[k].egr_port |= BIT(tmp->port); 2600 } 2601 2602 out: 2603 list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) { 2604 list_del(&tmp->list); 2605 kfree(tmp); 2606 } 2607 2608 return rc; 2609 } 2610 2611 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify); 2612 2613 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv) 2614 { 2615 struct sja1105_crosschip_switch *s, *pos; 2616 struct list_head crosschip_switches; 2617 struct dsa_8021q_crosschip_link *c; 2618 int rc = 0; 2619 2620 INIT_LIST_HEAD(&crosschip_switches); 2621 2622 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { 2623 bool already_added = false; 2624 2625 list_for_each_entry(s, &crosschip_switches, list) { 2626 if (s->other_ctx == c->other_ctx) { 2627 already_added = true; 2628 break; 2629 } 2630 } 2631 2632 if (already_added) 2633 continue; 2634 2635 s = kzalloc(sizeof(*s), GFP_KERNEL); 2636 if (!s) { 2637 dev_err(priv->ds->dev, "Failed to allocate memory\n"); 2638 rc = -ENOMEM; 2639 goto out; 2640 } 2641 s->other_ctx = c->other_ctx; 2642 list_add(&s->list, &crosschip_switches); 2643 } 2644 2645 list_for_each_entry(s, &crosschip_switches, list) { 2646 struct sja1105_private *other_priv = s->other_ctx->ds->priv; 2647 2648 rc = sja1105_build_vlan_table(other_priv, false); 2649 if (rc) 2650 goto out; 2651 } 2652 2653 out: 2654 list_for_each_entry_safe(s, pos, &crosschip_switches, list) { 2655 list_del(&s->list); 2656 kfree(s); 2657 } 2658 2659 return rc; 2660 } 2661 2662 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify) 2663 { 2664 u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN]; 2665 struct sja1105_retagging_entry *new_retagging; 2666 struct sja1105_vlan_lookup_entry *new_vlan; 2667 struct sja1105_table *table; 2668 int i, num_retagging = 0; 2669 int rc; 2670 2671 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2672 new_vlan = kcalloc(VLAN_N_VID, 2673 table->ops->unpacked_entry_size, GFP_KERNEL); 2674 if (!new_vlan) 2675 return -ENOMEM; 2676 2677 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2678 new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT, 2679 table->ops->unpacked_entry_size, GFP_KERNEL); 2680 if (!new_retagging) { 2681 kfree(new_vlan); 2682 return -ENOMEM; 2683 } 2684 2685 for (i = 0; i < VLAN_N_VID; i++) 2686 new_vlan[i].vlanid = VLAN_N_VID; 2687 2688 for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++) 2689 new_retagging[i].vlan_ing = VLAN_N_VID; 2690 2691 for (i = 0; i < priv->ds->num_ports; i++) 2692 sja1105_init_subvlan_map(subvlan_map[i]); 2693 2694 /* Bridge VLANs */ 2695 rc = sja1105_build_bridge_vlans(priv, new_vlan); 2696 if (rc) 2697 goto out; 2698 2699 /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c: 2700 * - RX VLANs 2701 * - TX VLANs 2702 * - Crosschip links 2703 */ 2704 rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan); 2705 if (rc) 2706 goto out; 2707 2708 /* Private VLANs necessary for dsa_8021q operation, which we need to 2709 * determine on our own: 2710 * - Sub-VLANs 2711 * - Sub-VLANs of crosschip switches 2712 */ 2713 rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging, 2714 &num_retagging); 2715 if (rc) 2716 goto out; 2717 2718 rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging, 2719 &num_retagging); 2720 if (rc) 2721 goto out; 2722 2723 rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging); 2724 if (rc) 2725 goto out; 2726 2727 rc = sja1105_commit_pvid(priv); 2728 if (rc) 2729 goto out; 2730 2731 for (i = 0; i < priv->ds->num_ports; i++) 2732 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]); 2733 2734 if (notify) { 2735 rc = sja1105_notify_crosschip_switches(priv); 2736 if (rc) 2737 goto out; 2738 } 2739 2740 out: 2741 kfree(new_vlan); 2742 kfree(new_retagging); 2743 2744 return rc; 2745 } 2746 2747 /* The TPID setting belongs to the General Parameters table, 2748 * which can only be partially reconfigured at runtime (and not the TPID). 2749 * So a switch reset is required. 2750 */ 2751 int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 2752 struct netlink_ext_ack *extack) 2753 { 2754 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 2755 struct sja1105_general_params_entry *general_params; 2756 struct sja1105_private *priv = ds->priv; 2757 enum sja1105_vlan_state state; 2758 struct sja1105_table *table; 2759 struct sja1105_rule *rule; 2760 bool want_tagging; 2761 u16 tpid, tpid2; 2762 int rc; 2763 2764 list_for_each_entry(rule, &priv->flow_block.rules, list) { 2765 if (rule->type == SJA1105_RULE_VL) { 2766 NL_SET_ERR_MSG_MOD(extack, 2767 "Cannot change VLAN filtering with active VL rules"); 2768 return -EBUSY; 2769 } 2770 } 2771 2772 if (enabled) { 2773 /* Enable VLAN filtering. */ 2774 tpid = ETH_P_8021Q; 2775 tpid2 = ETH_P_8021AD; 2776 } else { 2777 /* Disable VLAN filtering. */ 2778 tpid = ETH_P_SJA1105; 2779 tpid2 = ETH_P_SJA1105; 2780 } 2781 2782 for (port = 0; port < ds->num_ports; port++) { 2783 struct sja1105_port *sp = &priv->ports[port]; 2784 2785 if (enabled) 2786 sp->xmit_tpid = priv->info->qinq_tpid; 2787 else 2788 sp->xmit_tpid = ETH_P_SJA1105; 2789 } 2790 2791 if (!enabled) 2792 state = SJA1105_VLAN_UNAWARE; 2793 else if (priv->best_effort_vlan_filtering) 2794 state = SJA1105_VLAN_BEST_EFFORT; 2795 else 2796 state = SJA1105_VLAN_FILTERING_FULL; 2797 2798 if (priv->vlan_state == state) 2799 return 0; 2800 2801 priv->vlan_state = state; 2802 want_tagging = (state == SJA1105_VLAN_UNAWARE || 2803 state == SJA1105_VLAN_BEST_EFFORT); 2804 2805 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 2806 general_params = table->entries; 2807 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 2808 general_params->tpid = tpid; 2809 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 2810 general_params->tpid2 = tpid2; 2811 /* When VLAN filtering is on, we need to at least be able to 2812 * decode management traffic through the "backup plan". 2813 */ 2814 general_params->incl_srcpt1 = enabled; 2815 general_params->incl_srcpt0 = enabled; 2816 2817 want_tagging = priv->best_effort_vlan_filtering || !enabled; 2818 2819 /* VLAN filtering => independent VLAN learning. 2820 * No VLAN filtering (or best effort) => shared VLAN learning. 2821 * 2822 * In shared VLAN learning mode, untagged traffic still gets 2823 * pvid-tagged, and the FDB table gets populated with entries 2824 * containing the "real" (pvid or from VLAN tag) VLAN ID. 2825 * However the switch performs a masked L2 lookup in the FDB, 2826 * effectively only looking up a frame's DMAC (and not VID) for the 2827 * forwarding decision. 2828 * 2829 * This is extremely convenient for us, because in modes with 2830 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 2831 * each front panel port. This is good for identification but breaks 2832 * learning badly - the VID of the learnt FDB entry is unique, aka 2833 * no frames coming from any other port are going to have it. So 2834 * for forwarding purposes, this is as though learning was broken 2835 * (all frames get flooded). 2836 */ 2837 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 2838 l2_lookup_params = table->entries; 2839 l2_lookup_params->shared_learn = want_tagging; 2840 2841 sja1105_frame_memory_partitioning(priv); 2842 2843 rc = sja1105_build_vlan_table(priv, false); 2844 if (rc) 2845 return rc; 2846 2847 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); 2848 if (rc) 2849 NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype"); 2850 2851 /* Switch port identification based on 802.1Q is only passable 2852 * if we are not under a vlan_filtering bridge. So make sure 2853 * the two configurations are mutually exclusive (of course, the 2854 * user may know better, i.e. best_effort_vlan_filtering). 2855 */ 2856 return sja1105_setup_8021q_tagging(ds, want_tagging); 2857 } 2858 2859 /* Returns number of VLANs added (0 or 1) on success, 2860 * or a negative error code. 2861 */ 2862 static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid, 2863 u16 flags, struct list_head *vlan_list) 2864 { 2865 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 2866 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 2867 struct sja1105_bridge_vlan *v; 2868 2869 list_for_each_entry(v, vlan_list, list) { 2870 if (v->port == port && v->vid == vid) { 2871 /* Already added */ 2872 if (v->untagged == untagged && v->pvid == pvid) 2873 /* Nothing changed */ 2874 return 0; 2875 2876 /* It's the same VLAN, but some of the flags changed 2877 * and the user did not bother to delete it first. 2878 * Update it and trigger sja1105_build_vlan_table. 2879 */ 2880 v->untagged = untagged; 2881 v->pvid = pvid; 2882 return 1; 2883 } 2884 } 2885 2886 v = kzalloc(sizeof(*v), GFP_KERNEL); 2887 if (!v) { 2888 dev_err(ds->dev, "Out of memory while storing VLAN\n"); 2889 return -ENOMEM; 2890 } 2891 2892 v->port = port; 2893 v->vid = vid; 2894 v->untagged = untagged; 2895 v->pvid = pvid; 2896 list_add(&v->list, vlan_list); 2897 2898 return 1; 2899 } 2900 2901 /* Returns number of VLANs deleted (0 or 1) */ 2902 static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid, 2903 struct list_head *vlan_list) 2904 { 2905 struct sja1105_bridge_vlan *v, *n; 2906 2907 list_for_each_entry_safe(v, n, vlan_list, list) { 2908 if (v->port == port && v->vid == vid) { 2909 list_del(&v->list); 2910 kfree(v); 2911 return 1; 2912 } 2913 } 2914 2915 return 0; 2916 } 2917 2918 static int sja1105_vlan_add(struct dsa_switch *ds, int port, 2919 const struct switchdev_obj_port_vlan *vlan, 2920 struct netlink_ext_ack *extack) 2921 { 2922 struct sja1105_private *priv = ds->priv; 2923 bool vlan_table_changed = false; 2924 int rc; 2925 2926 /* If the user wants best-effort VLAN filtering (aka vlan_filtering 2927 * bridge plus tagging), be sure to at least deny alterations to the 2928 * configuration done by dsa_8021q. 2929 */ 2930 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL && 2931 vid_is_dsa_8021q(vlan->vid)) { 2932 NL_SET_ERR_MSG_MOD(extack, 2933 "Range 1024-3071 reserved for dsa_8021q operation"); 2934 return -EBUSY; 2935 } 2936 2937 rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags, 2938 &priv->bridge_vlans); 2939 if (rc < 0) 2940 return rc; 2941 if (rc > 0) 2942 vlan_table_changed = true; 2943 2944 if (!vlan_table_changed) 2945 return 0; 2946 2947 return sja1105_build_vlan_table(priv, true); 2948 } 2949 2950 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 2951 const struct switchdev_obj_port_vlan *vlan) 2952 { 2953 struct sja1105_private *priv = ds->priv; 2954 bool vlan_table_changed = false; 2955 int rc; 2956 2957 rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans); 2958 if (rc > 0) 2959 vlan_table_changed = true; 2960 2961 if (!vlan_table_changed) 2962 return 0; 2963 2964 return sja1105_build_vlan_table(priv, true); 2965 } 2966 2967 static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 2968 u16 flags) 2969 { 2970 struct sja1105_private *priv = ds->priv; 2971 int rc; 2972 2973 rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans); 2974 if (rc <= 0) 2975 return rc; 2976 2977 return sja1105_build_vlan_table(priv, true); 2978 } 2979 2980 static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 2981 { 2982 struct sja1105_private *priv = ds->priv; 2983 int rc; 2984 2985 rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans); 2986 if (!rc) 2987 return 0; 2988 2989 return sja1105_build_vlan_table(priv, true); 2990 } 2991 2992 static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = { 2993 .vlan_add = sja1105_dsa_8021q_vlan_add, 2994 .vlan_del = sja1105_dsa_8021q_vlan_del, 2995 }; 2996 2997 /* The programming model for the SJA1105 switch is "all-at-once" via static 2998 * configuration tables. Some of these can be dynamically modified at runtime, 2999 * but not the xMII mode parameters table. 3000 * Furthermode, some PHYs may not have crystals for generating their clocks 3001 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 3002 * ref_clk pin. So port clocking needs to be initialized early, before 3003 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 3004 * Setting correct PHY link speed does not matter now. 3005 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 3006 * bindings are not yet parsed by DSA core. We need to parse early so that we 3007 * can populate the xMII mode parameters table. 3008 */ 3009 static int sja1105_setup(struct dsa_switch *ds) 3010 { 3011 struct sja1105_dt_port ports[SJA1105_MAX_NUM_PORTS]; 3012 struct sja1105_private *priv = ds->priv; 3013 int rc; 3014 3015 rc = sja1105_parse_dt(priv, ports); 3016 if (rc < 0) { 3017 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 3018 return rc; 3019 } 3020 3021 /* Error out early if internal delays are required through DT 3022 * and we can't apply them. 3023 */ 3024 rc = sja1105_parse_rgmii_delays(priv, ports); 3025 if (rc < 0) { 3026 dev_err(ds->dev, "RGMII delay not supported\n"); 3027 return rc; 3028 } 3029 3030 rc = sja1105_ptp_clock_register(ds); 3031 if (rc < 0) { 3032 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 3033 return rc; 3034 } 3035 /* Create and send configuration down to device */ 3036 rc = sja1105_static_config_load(priv, ports); 3037 if (rc < 0) { 3038 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 3039 goto out_ptp_clock_unregister; 3040 } 3041 /* Configure the CGU (PHY link modes and speeds) */ 3042 rc = priv->info->clocking_setup(priv); 3043 if (rc < 0) { 3044 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 3045 goto out_static_config_free; 3046 } 3047 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 3048 * The only thing we can do to disable it is lie about what the 802.1Q 3049 * EtherType is. 3050 * So it will still try to apply VLAN filtering, but all ingress 3051 * traffic (except frames received with EtherType of ETH_P_SJA1105) 3052 * will be internally tagged with a distorted VLAN header where the 3053 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 3054 */ 3055 ds->vlan_filtering_is_global = true; 3056 3057 /* Advertise the 8 egress queues */ 3058 ds->num_tx_queues = SJA1105_NUM_TC; 3059 3060 ds->mtu_enforcement_ingress = true; 3061 3062 priv->best_effort_vlan_filtering = true; 3063 3064 rc = sja1105_devlink_setup(ds); 3065 if (rc < 0) 3066 goto out_static_config_free; 3067 3068 /* The DSA/switchdev model brings up switch ports in standalone mode by 3069 * default, and that means vlan_filtering is 0 since they're not under 3070 * a bridge, so it's safe to set up switch tagging at this time. 3071 */ 3072 rtnl_lock(); 3073 rc = sja1105_setup_8021q_tagging(ds, true); 3074 rtnl_unlock(); 3075 if (rc) 3076 goto out_devlink_teardown; 3077 3078 return 0; 3079 3080 out_devlink_teardown: 3081 sja1105_devlink_teardown(ds); 3082 out_ptp_clock_unregister: 3083 sja1105_ptp_clock_unregister(ds); 3084 out_static_config_free: 3085 sja1105_static_config_free(&priv->static_config); 3086 3087 return rc; 3088 } 3089 3090 static void sja1105_teardown(struct dsa_switch *ds) 3091 { 3092 struct sja1105_private *priv = ds->priv; 3093 struct sja1105_bridge_vlan *v, *n; 3094 int port; 3095 3096 for (port = 0; port < ds->num_ports; port++) { 3097 struct sja1105_port *sp = &priv->ports[port]; 3098 3099 if (!dsa_is_user_port(ds, port)) 3100 continue; 3101 3102 if (sp->xmit_worker) 3103 kthread_destroy_worker(sp->xmit_worker); 3104 } 3105 3106 sja1105_devlink_teardown(ds); 3107 sja1105_flower_teardown(ds); 3108 sja1105_tas_teardown(ds); 3109 sja1105_ptp_clock_unregister(ds); 3110 sja1105_static_config_free(&priv->static_config); 3111 3112 list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) { 3113 list_del(&v->list); 3114 kfree(v); 3115 } 3116 3117 list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) { 3118 list_del(&v->list); 3119 kfree(v); 3120 } 3121 } 3122 3123 static void sja1105_port_disable(struct dsa_switch *ds, int port) 3124 { 3125 struct sja1105_private *priv = ds->priv; 3126 struct sja1105_port *sp = &priv->ports[port]; 3127 3128 if (!dsa_is_user_port(ds, port)) 3129 return; 3130 3131 kthread_cancel_work_sync(&sp->xmit_work); 3132 skb_queue_purge(&sp->xmit_queue); 3133 } 3134 3135 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 3136 struct sk_buff *skb, bool takets) 3137 { 3138 struct sja1105_mgmt_entry mgmt_route = {0}; 3139 struct sja1105_private *priv = ds->priv; 3140 struct ethhdr *hdr; 3141 int timeout = 10; 3142 int rc; 3143 3144 hdr = eth_hdr(skb); 3145 3146 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 3147 mgmt_route.destports = BIT(port); 3148 mgmt_route.enfport = 1; 3149 mgmt_route.tsreg = 0; 3150 mgmt_route.takets = takets; 3151 3152 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 3153 slot, &mgmt_route, true); 3154 if (rc < 0) { 3155 kfree_skb(skb); 3156 return rc; 3157 } 3158 3159 /* Transfer skb to the host port. */ 3160 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); 3161 3162 /* Wait until the switch has processed the frame */ 3163 do { 3164 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 3165 slot, &mgmt_route); 3166 if (rc < 0) { 3167 dev_err_ratelimited(priv->ds->dev, 3168 "failed to poll for mgmt route\n"); 3169 continue; 3170 } 3171 3172 /* UM10944: The ENFPORT flag of the respective entry is 3173 * cleared when a match is found. The host can use this 3174 * flag as an acknowledgment. 3175 */ 3176 cpu_relax(); 3177 } while (mgmt_route.enfport && --timeout); 3178 3179 if (!timeout) { 3180 /* Clean up the management route so that a follow-up 3181 * frame may not match on it by mistake. 3182 * This is only hardware supported on P/Q/R/S - on E/T it is 3183 * a no-op and we are silently discarding the -EOPNOTSUPP. 3184 */ 3185 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 3186 slot, &mgmt_route, false); 3187 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 3188 } 3189 3190 return NETDEV_TX_OK; 3191 } 3192 3193 #define work_to_port(work) \ 3194 container_of((work), struct sja1105_port, xmit_work) 3195 #define tagger_to_sja1105(t) \ 3196 container_of((t), struct sja1105_private, tagger_data) 3197 3198 /* Deferred work is unfortunately necessary because setting up the management 3199 * route cannot be done from atomit context (SPI transfer takes a sleepable 3200 * lock on the bus) 3201 */ 3202 static void sja1105_port_deferred_xmit(struct kthread_work *work) 3203 { 3204 struct sja1105_port *sp = work_to_port(work); 3205 struct sja1105_tagger_data *tagger_data = sp->data; 3206 struct sja1105_private *priv = tagger_to_sja1105(tagger_data); 3207 int port = sp - priv->ports; 3208 struct sk_buff *skb; 3209 3210 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { 3211 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; 3212 3213 mutex_lock(&priv->mgmt_lock); 3214 3215 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); 3216 3217 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 3218 if (clone) 3219 sja1105_ptp_txtstamp_skb(priv->ds, port, clone); 3220 3221 mutex_unlock(&priv->mgmt_lock); 3222 } 3223 } 3224 3225 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 3226 * which cannot be reconfigured at runtime. So a switch reset is required. 3227 */ 3228 static int sja1105_set_ageing_time(struct dsa_switch *ds, 3229 unsigned int ageing_time) 3230 { 3231 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 3232 struct sja1105_private *priv = ds->priv; 3233 struct sja1105_table *table; 3234 unsigned int maxage; 3235 3236 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 3237 l2_lookup_params = table->entries; 3238 3239 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 3240 3241 if (l2_lookup_params->maxage == maxage) 3242 return 0; 3243 3244 l2_lookup_params->maxage = maxage; 3245 3246 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); 3247 } 3248 3249 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 3250 { 3251 struct sja1105_l2_policing_entry *policing; 3252 struct sja1105_private *priv = ds->priv; 3253 3254 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN; 3255 3256 if (dsa_is_cpu_port(ds, port)) 3257 new_mtu += VLAN_HLEN; 3258 3259 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3260 3261 if (policing[port].maxlen == new_mtu) 3262 return 0; 3263 3264 policing[port].maxlen = new_mtu; 3265 3266 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3267 } 3268 3269 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port) 3270 { 3271 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; 3272 } 3273 3274 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, 3275 enum tc_setup_type type, 3276 void *type_data) 3277 { 3278 switch (type) { 3279 case TC_SETUP_QDISC_TAPRIO: 3280 return sja1105_setup_tc_taprio(ds, port, type_data); 3281 case TC_SETUP_QDISC_CBS: 3282 return sja1105_setup_tc_cbs(ds, port, type_data); 3283 default: 3284 return -EOPNOTSUPP; 3285 } 3286 } 3287 3288 /* We have a single mirror (@to) port, but can configure ingress and egress 3289 * mirroring on all other (@from) ports. 3290 * We need to allow mirroring rules only as long as the @to port is always the 3291 * same, and we need to unset the @to port from mirr_port only when there is no 3292 * mirroring rule that references it. 3293 */ 3294 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, 3295 bool ingress, bool enabled) 3296 { 3297 struct sja1105_general_params_entry *general_params; 3298 struct sja1105_mac_config_entry *mac; 3299 struct dsa_switch *ds = priv->ds; 3300 struct sja1105_table *table; 3301 bool already_enabled; 3302 u64 new_mirr_port; 3303 int rc; 3304 3305 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 3306 general_params = table->entries; 3307 3308 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 3309 3310 already_enabled = (general_params->mirr_port != ds->num_ports); 3311 if (already_enabled && enabled && general_params->mirr_port != to) { 3312 dev_err(priv->ds->dev, 3313 "Delete mirroring rules towards port %llu first\n", 3314 general_params->mirr_port); 3315 return -EBUSY; 3316 } 3317 3318 new_mirr_port = to; 3319 if (!enabled) { 3320 bool keep = false; 3321 int port; 3322 3323 /* Anybody still referencing mirr_port? */ 3324 for (port = 0; port < ds->num_ports; port++) { 3325 if (mac[port].ing_mirr || mac[port].egr_mirr) { 3326 keep = true; 3327 break; 3328 } 3329 } 3330 /* Unset already_enabled for next time */ 3331 if (!keep) 3332 new_mirr_port = ds->num_ports; 3333 } 3334 if (new_mirr_port != general_params->mirr_port) { 3335 general_params->mirr_port = new_mirr_port; 3336 3337 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS, 3338 0, general_params, true); 3339 if (rc < 0) 3340 return rc; 3341 } 3342 3343 if (ingress) 3344 mac[from].ing_mirr = enabled; 3345 else 3346 mac[from].egr_mirr = enabled; 3347 3348 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from, 3349 &mac[from], true); 3350 } 3351 3352 static int sja1105_mirror_add(struct dsa_switch *ds, int port, 3353 struct dsa_mall_mirror_tc_entry *mirror, 3354 bool ingress) 3355 { 3356 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 3357 ingress, true); 3358 } 3359 3360 static void sja1105_mirror_del(struct dsa_switch *ds, int port, 3361 struct dsa_mall_mirror_tc_entry *mirror) 3362 { 3363 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 3364 mirror->ingress, false); 3365 } 3366 3367 static int sja1105_port_policer_add(struct dsa_switch *ds, int port, 3368 struct dsa_mall_policer_tc_entry *policer) 3369 { 3370 struct sja1105_l2_policing_entry *policing; 3371 struct sja1105_private *priv = ds->priv; 3372 3373 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3374 3375 /* In hardware, every 8 microseconds the credit level is incremented by 3376 * the value of RATE bytes divided by 64, up to a maximum of SMAX 3377 * bytes. 3378 */ 3379 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, 3380 1000000); 3381 policing[port].smax = policer->burst; 3382 3383 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3384 } 3385 3386 static void sja1105_port_policer_del(struct dsa_switch *ds, int port) 3387 { 3388 struct sja1105_l2_policing_entry *policing; 3389 struct sja1105_private *priv = ds->priv; 3390 3391 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3392 3393 policing[port].rate = SJA1105_RATE_MBPS(1000); 3394 policing[port].smax = 65535; 3395 3396 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3397 } 3398 3399 static int sja1105_port_set_learning(struct sja1105_private *priv, int port, 3400 bool enabled) 3401 { 3402 struct sja1105_mac_config_entry *mac; 3403 int rc; 3404 3405 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 3406 3407 mac[port].dyn_learn = enabled; 3408 3409 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 3410 &mac[port], true); 3411 if (rc) 3412 return rc; 3413 3414 if (enabled) 3415 priv->learn_ena |= BIT(port); 3416 else 3417 priv->learn_ena &= ~BIT(port); 3418 3419 return 0; 3420 } 3421 3422 static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to, 3423 struct switchdev_brport_flags flags) 3424 { 3425 if (flags.mask & BR_FLOOD) { 3426 if (flags.val & BR_FLOOD) 3427 priv->ucast_egress_floods |= BIT(to); 3428 else 3429 priv->ucast_egress_floods &= ~BIT(to); 3430 } 3431 3432 if (flags.mask & BR_BCAST_FLOOD) { 3433 if (flags.val & BR_BCAST_FLOOD) 3434 priv->bcast_egress_floods |= BIT(to); 3435 else 3436 priv->bcast_egress_floods &= ~BIT(to); 3437 } 3438 3439 return sja1105_manage_flood_domains(priv); 3440 } 3441 3442 static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, 3443 struct switchdev_brport_flags flags, 3444 struct netlink_ext_ack *extack) 3445 { 3446 struct sja1105_l2_lookup_entry *l2_lookup; 3447 struct sja1105_table *table; 3448 int match; 3449 3450 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 3451 l2_lookup = table->entries; 3452 3453 for (match = 0; match < table->entry_count; match++) 3454 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST && 3455 l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST) 3456 break; 3457 3458 if (match == table->entry_count) { 3459 NL_SET_ERR_MSG_MOD(extack, 3460 "Could not find FDB entry for unknown multicast"); 3461 return -ENOSPC; 3462 } 3463 3464 if (flags.val & BR_MCAST_FLOOD) 3465 l2_lookup[match].destports |= BIT(to); 3466 else 3467 l2_lookup[match].destports &= ~BIT(to); 3468 3469 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 3470 l2_lookup[match].index, 3471 &l2_lookup[match], 3472 true); 3473 } 3474 3475 static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port, 3476 struct switchdev_brport_flags flags, 3477 struct netlink_ext_ack *extack) 3478 { 3479 struct sja1105_private *priv = ds->priv; 3480 3481 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 3482 BR_BCAST_FLOOD)) 3483 return -EINVAL; 3484 3485 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) && 3486 !priv->info->can_limit_mcast_flood) { 3487 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 3488 bool unicast = !!(flags.val & BR_FLOOD); 3489 3490 if (unicast != multicast) { 3491 NL_SET_ERR_MSG_MOD(extack, 3492 "This chip cannot configure multicast flooding independently of unicast"); 3493 return -EINVAL; 3494 } 3495 } 3496 3497 return 0; 3498 } 3499 3500 static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, 3501 struct switchdev_brport_flags flags, 3502 struct netlink_ext_ack *extack) 3503 { 3504 struct sja1105_private *priv = ds->priv; 3505 int rc; 3506 3507 if (flags.mask & BR_LEARNING) { 3508 bool learn_ena = !!(flags.val & BR_LEARNING); 3509 3510 rc = sja1105_port_set_learning(priv, port, learn_ena); 3511 if (rc) 3512 return rc; 3513 } 3514 3515 if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) { 3516 rc = sja1105_port_ucast_bcast_flood(priv, port, flags); 3517 if (rc) 3518 return rc; 3519 } 3520 3521 /* For chips that can't offload BR_MCAST_FLOOD independently, there 3522 * is nothing to do here, we ensured the configuration is in sync by 3523 * offloading BR_FLOOD. 3524 */ 3525 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) { 3526 rc = sja1105_port_mcast_flood(priv, port, flags, 3527 extack); 3528 if (rc) 3529 return rc; 3530 } 3531 3532 return 0; 3533 } 3534 3535 static const struct dsa_switch_ops sja1105_switch_ops = { 3536 .get_tag_protocol = sja1105_get_tag_protocol, 3537 .setup = sja1105_setup, 3538 .teardown = sja1105_teardown, 3539 .set_ageing_time = sja1105_set_ageing_time, 3540 .port_change_mtu = sja1105_change_mtu, 3541 .port_max_mtu = sja1105_get_max_mtu, 3542 .phylink_validate = sja1105_phylink_validate, 3543 .phylink_mac_link_state = sja1105_mac_pcs_get_state, 3544 .phylink_mac_config = sja1105_mac_config, 3545 .phylink_mac_link_up = sja1105_mac_link_up, 3546 .phylink_mac_link_down = sja1105_mac_link_down, 3547 .get_strings = sja1105_get_strings, 3548 .get_ethtool_stats = sja1105_get_ethtool_stats, 3549 .get_sset_count = sja1105_get_sset_count, 3550 .get_ts_info = sja1105_get_ts_info, 3551 .port_disable = sja1105_port_disable, 3552 .port_fdb_dump = sja1105_fdb_dump, 3553 .port_fdb_add = sja1105_fdb_add, 3554 .port_fdb_del = sja1105_fdb_del, 3555 .port_bridge_join = sja1105_bridge_join, 3556 .port_bridge_leave = sja1105_bridge_leave, 3557 .port_pre_bridge_flags = sja1105_port_pre_bridge_flags, 3558 .port_bridge_flags = sja1105_port_bridge_flags, 3559 .port_stp_state_set = sja1105_bridge_stp_state_set, 3560 .port_vlan_filtering = sja1105_vlan_filtering, 3561 .port_vlan_add = sja1105_vlan_add, 3562 .port_vlan_del = sja1105_vlan_del, 3563 .port_mdb_add = sja1105_mdb_add, 3564 .port_mdb_del = sja1105_mdb_del, 3565 .port_hwtstamp_get = sja1105_hwtstamp_get, 3566 .port_hwtstamp_set = sja1105_hwtstamp_set, 3567 .port_rxtstamp = sja1105_port_rxtstamp, 3568 .port_txtstamp = sja1105_port_txtstamp, 3569 .port_setup_tc = sja1105_port_setup_tc, 3570 .port_mirror_add = sja1105_mirror_add, 3571 .port_mirror_del = sja1105_mirror_del, 3572 .port_policer_add = sja1105_port_policer_add, 3573 .port_policer_del = sja1105_port_policer_del, 3574 .cls_flower_add = sja1105_cls_flower_add, 3575 .cls_flower_del = sja1105_cls_flower_del, 3576 .cls_flower_stats = sja1105_cls_flower_stats, 3577 .crosschip_bridge_join = sja1105_crosschip_bridge_join, 3578 .crosschip_bridge_leave = sja1105_crosschip_bridge_leave, 3579 .devlink_param_get = sja1105_devlink_param_get, 3580 .devlink_param_set = sja1105_devlink_param_set, 3581 .devlink_info_get = sja1105_devlink_info_get, 3582 }; 3583 3584 static const struct of_device_id sja1105_dt_ids[]; 3585 3586 static int sja1105_check_device_id(struct sja1105_private *priv) 3587 { 3588 const struct sja1105_regs *regs = priv->info->regs; 3589 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 3590 struct device *dev = &priv->spidev->dev; 3591 const struct of_device_id *match; 3592 u32 device_id; 3593 u64 part_no; 3594 int rc; 3595 3596 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, 3597 NULL); 3598 if (rc < 0) 3599 return rc; 3600 3601 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, 3602 SJA1105_SIZE_DEVICE_ID); 3603 if (rc < 0) 3604 return rc; 3605 3606 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 3607 3608 for (match = sja1105_dt_ids; match->compatible[0]; match++) { 3609 const struct sja1105_info *info = match->data; 3610 3611 /* Is what's been probed in our match table at all? */ 3612 if (info->device_id != device_id || info->part_no != part_no) 3613 continue; 3614 3615 /* But is it what's in the device tree? */ 3616 if (priv->info->device_id != device_id || 3617 priv->info->part_no != part_no) { 3618 dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n", 3619 priv->info->name, info->name); 3620 /* It isn't. No problem, pick that up. */ 3621 priv->info = info; 3622 } 3623 3624 return 0; 3625 } 3626 3627 dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n", 3628 device_id, part_no); 3629 3630 return -ENODEV; 3631 } 3632 3633 static int sja1105_probe(struct spi_device *spi) 3634 { 3635 struct sja1105_tagger_data *tagger_data; 3636 struct device *dev = &spi->dev; 3637 struct sja1105_private *priv; 3638 size_t max_xfer, max_msg; 3639 struct dsa_switch *ds; 3640 int rc, port; 3641 3642 if (!dev->of_node) { 3643 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 3644 return -EINVAL; 3645 } 3646 3647 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 3648 if (!priv) 3649 return -ENOMEM; 3650 3651 /* Configure the optional reset pin and bring up switch */ 3652 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 3653 if (IS_ERR(priv->reset_gpio)) 3654 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 3655 else 3656 sja1105_hw_reset(priv->reset_gpio, 1, 1); 3657 3658 /* Populate our driver private structure (priv) based on 3659 * the device tree node that was probed (spi) 3660 */ 3661 priv->spidev = spi; 3662 spi_set_drvdata(spi, priv); 3663 3664 /* Configure the SPI bus */ 3665 spi->bits_per_word = 8; 3666 rc = spi_setup(spi); 3667 if (rc < 0) { 3668 dev_err(dev, "Could not init SPI\n"); 3669 return rc; 3670 } 3671 3672 /* In sja1105_xfer, we send spi_messages composed of two spi_transfers: 3673 * a small one for the message header and another one for the current 3674 * chunk of the packed buffer. 3675 * Check that the restrictions imposed by the SPI controller are 3676 * respected: the chunk buffer is smaller than the max transfer size, 3677 * and the total length of the chunk plus its message header is smaller 3678 * than the max message size. 3679 * We do that during probe time since the maximum transfer size is a 3680 * runtime invariant. 3681 */ 3682 max_xfer = spi_max_transfer_size(spi); 3683 max_msg = spi_max_message_size(spi); 3684 3685 /* We need to send at least one 64-bit word of SPI payload per message 3686 * in order to be able to make useful progress. 3687 */ 3688 if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) { 3689 dev_err(dev, "SPI master cannot send large enough buffers, aborting\n"); 3690 return -EINVAL; 3691 } 3692 3693 priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN; 3694 if (priv->max_xfer_len > max_xfer) 3695 priv->max_xfer_len = max_xfer; 3696 if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER) 3697 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER; 3698 3699 priv->info = of_device_get_match_data(dev); 3700 3701 /* Detect hardware device */ 3702 rc = sja1105_check_device_id(priv); 3703 if (rc < 0) { 3704 dev_err(dev, "Device ID check failed: %d\n", rc); 3705 return rc; 3706 } 3707 3708 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 3709 3710 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); 3711 if (!ds) 3712 return -ENOMEM; 3713 3714 ds->dev = dev; 3715 ds->num_ports = SJA1105_MAX_NUM_PORTS; 3716 ds->ops = &sja1105_switch_ops; 3717 ds->priv = priv; 3718 priv->ds = ds; 3719 3720 tagger_data = &priv->tagger_data; 3721 3722 mutex_init(&priv->ptp_data.lock); 3723 mutex_init(&priv->mgmt_lock); 3724 3725 priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx), 3726 GFP_KERNEL); 3727 if (!priv->dsa_8021q_ctx) 3728 return -ENOMEM; 3729 3730 priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops; 3731 priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q); 3732 priv->dsa_8021q_ctx->ds = ds; 3733 3734 INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links); 3735 INIT_LIST_HEAD(&priv->bridge_vlans); 3736 INIT_LIST_HEAD(&priv->dsa_8021q_vlans); 3737 3738 sja1105_tas_setup(ds); 3739 sja1105_flower_setup(ds); 3740 3741 rc = dsa_register_switch(priv->ds); 3742 if (rc) 3743 return rc; 3744 3745 if (IS_ENABLED(CONFIG_NET_SCH_CBS)) { 3746 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, 3747 sizeof(struct sja1105_cbs_entry), 3748 GFP_KERNEL); 3749 if (!priv->cbs) { 3750 rc = -ENOMEM; 3751 goto out_unregister_switch; 3752 } 3753 } 3754 3755 /* Connections between dsa_port and sja1105_port */ 3756 for (port = 0; port < ds->num_ports; port++) { 3757 struct sja1105_port *sp = &priv->ports[port]; 3758 struct dsa_port *dp = dsa_to_port(ds, port); 3759 struct net_device *slave; 3760 int subvlan; 3761 3762 if (!dsa_is_user_port(ds, port)) 3763 continue; 3764 3765 dp->priv = sp; 3766 sp->dp = dp; 3767 sp->data = tagger_data; 3768 slave = dp->slave; 3769 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); 3770 sp->xmit_worker = kthread_create_worker(0, "%s_xmit", 3771 slave->name); 3772 if (IS_ERR(sp->xmit_worker)) { 3773 rc = PTR_ERR(sp->xmit_worker); 3774 dev_err(ds->dev, 3775 "failed to create deferred xmit thread: %d\n", 3776 rc); 3777 goto out_destroy_workers; 3778 } 3779 skb_queue_head_init(&sp->xmit_queue); 3780 sp->xmit_tpid = ETH_P_SJA1105; 3781 3782 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 3783 sp->subvlan_map[subvlan] = VLAN_N_VID; 3784 } 3785 3786 return 0; 3787 3788 out_destroy_workers: 3789 while (port-- > 0) { 3790 struct sja1105_port *sp = &priv->ports[port]; 3791 3792 if (!dsa_is_user_port(ds, port)) 3793 continue; 3794 3795 kthread_destroy_worker(sp->xmit_worker); 3796 } 3797 3798 out_unregister_switch: 3799 dsa_unregister_switch(ds); 3800 3801 return rc; 3802 } 3803 3804 static int sja1105_remove(struct spi_device *spi) 3805 { 3806 struct sja1105_private *priv = spi_get_drvdata(spi); 3807 3808 dsa_unregister_switch(priv->ds); 3809 return 0; 3810 } 3811 3812 static const struct of_device_id sja1105_dt_ids[] = { 3813 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 3814 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 3815 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 3816 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 3817 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 3818 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 3819 { /* sentinel */ }, 3820 }; 3821 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 3822 3823 static struct spi_driver sja1105_driver = { 3824 .driver = { 3825 .name = "sja1105", 3826 .owner = THIS_MODULE, 3827 .of_match_table = of_match_ptr(sja1105_dt_ids), 3828 }, 3829 .probe = sja1105_probe, 3830 .remove = sja1105_remove, 3831 }; 3832 3833 module_spi_driver(sja1105_driver); 3834 3835 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 3836 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 3837 MODULE_DESCRIPTION("SJA1105 Driver"); 3838 MODULE_LICENSE("GPL v2"); 3839