1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 #include "sja1105_sgmii.h" 26 #include "sja1105_tas.h" 27 28 #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull 29 #define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) 30 31 static const struct dsa_switch_ops sja1105_switch_ops; 32 33 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 34 unsigned int startup_delay) 35 { 36 gpiod_set_value_cansleep(gpio, 1); 37 /* Wait for minimum reset pulse length */ 38 msleep(pulse_len); 39 gpiod_set_value_cansleep(gpio, 0); 40 /* Wait until chip is ready after reset */ 41 msleep(startup_delay); 42 } 43 44 static void 45 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 46 int from, int to, bool allow) 47 { 48 if (allow) 49 l2_fwd[from].reach_port |= BIT(to); 50 else 51 l2_fwd[from].reach_port &= ~BIT(to); 52 } 53 54 static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd, 55 int from, int to) 56 { 57 return !!(l2_fwd[from].reach_port & BIT(to)); 58 } 59 60 /* Structure used to temporarily transport device tree 61 * settings into sja1105_setup 62 */ 63 struct sja1105_dt_port { 64 phy_interface_t phy_mode; 65 sja1105_mii_role_t role; 66 }; 67 68 static int sja1105_init_mac_settings(struct sja1105_private *priv) 69 { 70 struct sja1105_mac_config_entry default_mac = { 71 /* Enable all 8 priority queues on egress. 72 * Every queue i holds top[i] - base[i] frames. 73 * Sum of top[i] - base[i] is 511 (max hardware limit). 74 */ 75 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 76 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 77 .enabled = {true, true, true, true, true, true, true, true}, 78 /* Keep standard IFG of 12 bytes on egress. */ 79 .ifg = 0, 80 /* Always put the MAC speed in automatic mode, where it can be 81 * adjusted at runtime by PHYLINK. 82 */ 83 .speed = SJA1105_SPEED_AUTO, 84 /* No static correction for 1-step 1588 events */ 85 .tp_delin = 0, 86 .tp_delout = 0, 87 /* Disable aging for critical TTEthernet traffic */ 88 .maxage = 0xFF, 89 /* Internal VLAN (pvid) to apply to untagged ingress */ 90 .vlanprio = 0, 91 .vlanid = 1, 92 .ing_mirr = false, 93 .egr_mirr = false, 94 /* Don't drop traffic with other EtherType than ETH_P_IP */ 95 .drpnona664 = false, 96 /* Don't drop double-tagged traffic */ 97 .drpdtag = false, 98 /* Don't drop untagged traffic */ 99 .drpuntag = false, 100 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 101 .retag = false, 102 /* Disable learning and I/O on user ports by default - 103 * STP will enable it. 104 */ 105 .dyn_learn = false, 106 .egress = false, 107 .ingress = false, 108 }; 109 struct sja1105_mac_config_entry *mac; 110 struct sja1105_table *table; 111 int i; 112 113 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 114 115 /* Discard previous MAC Configuration Table */ 116 if (table->entry_count) { 117 kfree(table->entries); 118 table->entry_count = 0; 119 } 120 121 table->entries = kcalloc(SJA1105_NUM_PORTS, 122 table->ops->unpacked_entry_size, GFP_KERNEL); 123 if (!table->entries) 124 return -ENOMEM; 125 126 table->entry_count = SJA1105_NUM_PORTS; 127 128 mac = table->entries; 129 130 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 131 mac[i] = default_mac; 132 if (i == dsa_upstream_port(priv->ds, i)) { 133 /* STP doesn't get called for CPU port, so we need to 134 * set the I/O parameters statically. 135 */ 136 mac[i].dyn_learn = true; 137 mac[i].ingress = true; 138 mac[i].egress = true; 139 } 140 } 141 142 return 0; 143 } 144 145 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port) 146 { 147 if (priv->info->part_no != SJA1105R_PART_NO && 148 priv->info->part_no != SJA1105S_PART_NO) 149 return false; 150 151 if (port != SJA1105_SGMII_PORT) 152 return false; 153 154 if (dsa_is_unused_port(priv->ds, port)) 155 return false; 156 157 return true; 158 } 159 160 static int sja1105_init_mii_settings(struct sja1105_private *priv, 161 struct sja1105_dt_port *ports) 162 { 163 struct device *dev = &priv->spidev->dev; 164 struct sja1105_xmii_params_entry *mii; 165 struct sja1105_table *table; 166 int i; 167 168 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 169 170 /* Discard previous xMII Mode Parameters Table */ 171 if (table->entry_count) { 172 kfree(table->entries); 173 table->entry_count = 0; 174 } 175 176 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, 177 table->ops->unpacked_entry_size, GFP_KERNEL); 178 if (!table->entries) 179 return -ENOMEM; 180 181 /* Override table based on PHYLINK DT bindings */ 182 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; 183 184 mii = table->entries; 185 186 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 187 if (dsa_is_unused_port(priv->ds, i)) 188 continue; 189 190 switch (ports[i].phy_mode) { 191 case PHY_INTERFACE_MODE_MII: 192 mii->xmii_mode[i] = XMII_MODE_MII; 193 break; 194 case PHY_INTERFACE_MODE_RMII: 195 mii->xmii_mode[i] = XMII_MODE_RMII; 196 break; 197 case PHY_INTERFACE_MODE_RGMII: 198 case PHY_INTERFACE_MODE_RGMII_ID: 199 case PHY_INTERFACE_MODE_RGMII_RXID: 200 case PHY_INTERFACE_MODE_RGMII_TXID: 201 mii->xmii_mode[i] = XMII_MODE_RGMII; 202 break; 203 case PHY_INTERFACE_MODE_SGMII: 204 if (!sja1105_supports_sgmii(priv, i)) 205 return -EINVAL; 206 mii->xmii_mode[i] = XMII_MODE_SGMII; 207 break; 208 default: 209 dev_err(dev, "Unsupported PHY mode %s!\n", 210 phy_modes(ports[i].phy_mode)); 211 return -EINVAL; 212 } 213 214 /* Even though the SerDes port is able to drive SGMII autoneg 215 * like a PHY would, from the perspective of the XMII tables, 216 * the SGMII port should always be put in MAC mode. 217 */ 218 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII) 219 mii->phy_mac[i] = XMII_MAC; 220 else 221 mii->phy_mac[i] = ports[i].role; 222 } 223 return 0; 224 } 225 226 static int sja1105_init_static_fdb(struct sja1105_private *priv) 227 { 228 struct sja1105_l2_lookup_entry *l2_lookup; 229 struct sja1105_table *table; 230 int port; 231 232 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 233 234 /* We only populate the FDB table through dynamic L2 Address Lookup 235 * entries, except for a special entry at the end which is a catch-all 236 * for unknown multicast and will be used to control flooding domain. 237 */ 238 if (table->entry_count) { 239 kfree(table->entries); 240 table->entry_count = 0; 241 } 242 243 if (!priv->info->can_limit_mcast_flood) 244 return 0; 245 246 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 247 GFP_KERNEL); 248 if (!table->entries) 249 return -ENOMEM; 250 251 table->entry_count = 1; 252 l2_lookup = table->entries; 253 254 /* All L2 multicast addresses have an odd first octet */ 255 l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST; 256 l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST; 257 l2_lookup[0].lockeds = true; 258 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1; 259 260 /* Flood multicast to every port by default */ 261 for (port = 0; port < priv->ds->num_ports; port++) 262 if (!dsa_is_unused_port(priv->ds, port)) 263 l2_lookup[0].destports |= BIT(port); 264 265 return 0; 266 } 267 268 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 269 { 270 struct sja1105_table *table; 271 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS; 272 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 273 /* Learned FDB entries are forgotten after 300 seconds */ 274 .maxage = SJA1105_AGEING_TIME_MS(300000), 275 /* All entries within a FDB bin are available for learning */ 276 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 277 /* And the P/Q/R/S equivalent setting: */ 278 .start_dynspc = 0, 279 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries, 280 max_fdb_entries, max_fdb_entries, }, 281 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 282 .poly = 0x97, 283 /* This selects between Independent VLAN Learning (IVL) and 284 * Shared VLAN Learning (SVL) 285 */ 286 .shared_learn = true, 287 /* Don't discard management traffic based on ENFPORT - 288 * we don't perform SMAC port enforcement anyway, so 289 * what we are setting here doesn't matter. 290 */ 291 .no_enf_hostprt = false, 292 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 293 * Maybe correlate with no_linklocal_learn from bridge driver? 294 */ 295 .no_mgmt_learn = true, 296 /* P/Q/R/S only */ 297 .use_static = true, 298 /* Dynamically learned FDB entries can overwrite other (older) 299 * dynamic FDB entries 300 */ 301 .owr_dyn = true, 302 .drpnolearn = true, 303 }; 304 305 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 306 307 if (table->entry_count) { 308 kfree(table->entries); 309 table->entry_count = 0; 310 } 311 312 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, 313 table->ops->unpacked_entry_size, GFP_KERNEL); 314 if (!table->entries) 315 return -ENOMEM; 316 317 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; 318 319 /* This table only has a single entry */ 320 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 321 default_l2_lookup_params; 322 323 return 0; 324 } 325 326 /* Set up a default VLAN for untagged traffic injected from the CPU 327 * using management routes (e.g. STP, PTP) as opposed to tag_8021q. 328 * All DT-defined ports are members of this VLAN, and there are no 329 * restrictions on forwarding (since the CPU selects the destination). 330 * Frames from this VLAN will always be transmitted as untagged, and 331 * neither the bridge nor the 8021q module cannot create this VLAN ID. 332 */ 333 static int sja1105_init_static_vlan(struct sja1105_private *priv) 334 { 335 struct sja1105_table *table; 336 struct sja1105_vlan_lookup_entry pvid = { 337 .ving_mirr = 0, 338 .vegr_mirr = 0, 339 .vmemb_port = 0, 340 .vlan_bc = 0, 341 .tag_port = 0, 342 .vlanid = SJA1105_DEFAULT_VLAN, 343 }; 344 struct dsa_switch *ds = priv->ds; 345 int port; 346 347 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 348 349 if (table->entry_count) { 350 kfree(table->entries); 351 table->entry_count = 0; 352 } 353 354 table->entries = kzalloc(table->ops->unpacked_entry_size, 355 GFP_KERNEL); 356 if (!table->entries) 357 return -ENOMEM; 358 359 table->entry_count = 1; 360 361 for (port = 0; port < ds->num_ports; port++) { 362 struct sja1105_bridge_vlan *v; 363 364 if (dsa_is_unused_port(ds, port)) 365 continue; 366 367 pvid.vmemb_port |= BIT(port); 368 pvid.vlan_bc |= BIT(port); 369 pvid.tag_port &= ~BIT(port); 370 371 v = kzalloc(sizeof(*v), GFP_KERNEL); 372 if (!v) 373 return -ENOMEM; 374 375 v->port = port; 376 v->vid = SJA1105_DEFAULT_VLAN; 377 v->untagged = true; 378 if (dsa_is_cpu_port(ds, port)) 379 v->pvid = true; 380 list_add(&v->list, &priv->dsa_8021q_vlans); 381 } 382 383 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 384 return 0; 385 } 386 387 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 388 { 389 struct sja1105_l2_forwarding_entry *l2fwd; 390 struct sja1105_table *table; 391 int i, j; 392 393 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 394 395 if (table->entry_count) { 396 kfree(table->entries); 397 table->entry_count = 0; 398 } 399 400 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, 401 table->ops->unpacked_entry_size, GFP_KERNEL); 402 if (!table->entries) 403 return -ENOMEM; 404 405 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; 406 407 l2fwd = table->entries; 408 409 /* First 5 entries define the forwarding rules */ 410 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 411 unsigned int upstream = dsa_upstream_port(priv->ds, i); 412 413 for (j = 0; j < SJA1105_NUM_TC; j++) 414 l2fwd[i].vlan_pmap[j] = j; 415 416 /* All ports start up with egress flooding enabled, 417 * including the CPU port. 418 */ 419 priv->ucast_egress_floods |= BIT(i); 420 priv->bcast_egress_floods |= BIT(i); 421 422 if (i == upstream) 423 continue; 424 425 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 426 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 427 428 l2fwd[i].bc_domain = BIT(upstream); 429 l2fwd[i].fl_domain = BIT(upstream); 430 431 l2fwd[upstream].bc_domain |= BIT(i); 432 l2fwd[upstream].fl_domain |= BIT(i); 433 } 434 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 435 * Create a one-to-one mapping. 436 */ 437 for (i = 0; i < SJA1105_NUM_TC; i++) 438 for (j = 0; j < SJA1105_NUM_PORTS; j++) 439 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; 440 441 return 0; 442 } 443 444 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 445 { 446 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { 447 /* Disallow dynamic reconfiguration of vlan_pmap */ 448 .max_dynp = 0, 449 /* Use a single memory partition for all ingress queues */ 450 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, 451 }; 452 struct sja1105_table *table; 453 454 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 455 456 if (table->entry_count) { 457 kfree(table->entries); 458 table->entry_count = 0; 459 } 460 461 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, 462 table->ops->unpacked_entry_size, GFP_KERNEL); 463 if (!table->entries) 464 return -ENOMEM; 465 466 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; 467 468 /* This table only has a single entry */ 469 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = 470 default_l2fwd_params; 471 472 return 0; 473 } 474 475 void sja1105_frame_memory_partitioning(struct sja1105_private *priv) 476 { 477 struct sja1105_l2_forwarding_params_entry *l2_fwd_params; 478 struct sja1105_vl_forwarding_params_entry *vl_fwd_params; 479 struct sja1105_table *table; 480 int max_mem; 481 482 /* VLAN retagging is implemented using a loopback port that consumes 483 * frame buffers. That leaves less for us. 484 */ 485 if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT) 486 max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING; 487 else 488 max_mem = SJA1105_MAX_FRAME_MEMORY; 489 490 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 491 l2_fwd_params = table->entries; 492 l2_fwd_params->part_spc[0] = max_mem; 493 494 /* If we have any critical-traffic virtual links, we need to reserve 495 * some frame buffer memory for them. At the moment, hardcode the value 496 * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks 497 * remaining for best-effort traffic. TODO: figure out a more flexible 498 * way to perform the frame buffer partitioning. 499 */ 500 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count) 501 return; 502 503 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS]; 504 vl_fwd_params = table->entries; 505 506 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY; 507 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY; 508 } 509 510 static int sja1105_init_general_params(struct sja1105_private *priv) 511 { 512 struct sja1105_general_params_entry default_general_params = { 513 /* Allow dynamic changing of the mirror port */ 514 .mirr_ptacu = true, 515 .switchid = priv->ds->index, 516 /* Priority queue for link-local management frames 517 * (both ingress to and egress from CPU - PTP, STP etc) 518 */ 519 .hostprio = 7, 520 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 521 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 522 .incl_srcpt1 = false, 523 .send_meta1 = false, 524 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 525 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 526 .incl_srcpt0 = false, 527 .send_meta0 = false, 528 /* The destination for traffic matching mac_fltres1 and 529 * mac_fltres0 on all ports except host_port. Such traffic 530 * receieved on host_port itself would be dropped, except 531 * by installing a temporary 'management route' 532 */ 533 .host_port = dsa_upstream_port(priv->ds, 0), 534 /* Default to an invalid value */ 535 .mirr_port = SJA1105_NUM_PORTS, 536 /* Link-local traffic received on casc_port will be forwarded 537 * to host_port without embedding the source port and device ID 538 * info in the destination MAC address (presumably because it 539 * is a cascaded port and a downstream SJA switch already did 540 * that). Default to an invalid port (to disable the feature) 541 * and overwrite this if we find any DSA (cascaded) ports. 542 */ 543 .casc_port = SJA1105_NUM_PORTS, 544 /* No TTEthernet */ 545 .vllupformat = SJA1105_VL_FORMAT_PSFP, 546 .vlmarker = 0, 547 .vlmask = 0, 548 /* Only update correctionField for 1-step PTP (L2 transport) */ 549 .ignore2stf = 0, 550 /* Forcefully disable VLAN filtering by telling 551 * the switch that VLAN has a different EtherType. 552 */ 553 .tpid = ETH_P_SJA1105, 554 .tpid2 = ETH_P_SJA1105, 555 }; 556 struct sja1105_table *table; 557 558 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 559 560 if (table->entry_count) { 561 kfree(table->entries); 562 table->entry_count = 0; 563 } 564 565 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, 566 table->ops->unpacked_entry_size, GFP_KERNEL); 567 if (!table->entries) 568 return -ENOMEM; 569 570 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; 571 572 /* This table only has a single entry */ 573 ((struct sja1105_general_params_entry *)table->entries)[0] = 574 default_general_params; 575 576 return 0; 577 } 578 579 static int sja1105_init_avb_params(struct sja1105_private *priv) 580 { 581 struct sja1105_avb_params_entry *avb; 582 struct sja1105_table *table; 583 584 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 585 586 /* Discard previous AVB Parameters Table */ 587 if (table->entry_count) { 588 kfree(table->entries); 589 table->entry_count = 0; 590 } 591 592 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, 593 table->ops->unpacked_entry_size, GFP_KERNEL); 594 if (!table->entries) 595 return -ENOMEM; 596 597 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; 598 599 avb = table->entries; 600 601 /* Configure the MAC addresses for meta frames */ 602 avb->destmeta = SJA1105_META_DMAC; 603 avb->srcmeta = SJA1105_META_SMAC; 604 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by 605 * default. This is because there might be boards with a hardware 606 * layout where enabling the pin as output might cause an electrical 607 * clash. On E/T the pin is always an output, which the board designers 608 * probably already knew, so even if there are going to be electrical 609 * issues, there's nothing we can do. 610 */ 611 avb->cas_master = false; 612 613 return 0; 614 } 615 616 /* The L2 policing table is 2-stage. The table is looked up for each frame 617 * according to the ingress port, whether it was broadcast or not, and the 618 * classified traffic class (given by VLAN PCP). This portion of the lookup is 619 * fixed, and gives access to the SHARINDX, an indirection register pointing 620 * within the policing table itself, which is used to resolve the policer that 621 * will be used for this frame. 622 * 623 * Stage 1 Stage 2 624 * +------------+--------+ +---------------------------------+ 625 * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU | 626 * +------------+--------+ +---------------------------------+ 627 * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU | 628 * +------------+--------+ +---------------------------------+ 629 * ... | Policer 2: Rate, Burst, MTU | 630 * +------------+--------+ +---------------------------------+ 631 * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU | 632 * +------------+--------+ +---------------------------------+ 633 * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU | 634 * +------------+--------+ +---------------------------------+ 635 * ... | Policer 5: Rate, Burst, MTU | 636 * +------------+--------+ +---------------------------------+ 637 * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU | 638 * +------------+--------+ +---------------------------------+ 639 * ... | Policer 7: Rate, Burst, MTU | 640 * +------------+--------+ +---------------------------------+ 641 * |Port 4 TC 7 |SHARINDX| ... 642 * +------------+--------+ 643 * |Port 0 BCAST|SHARINDX| ... 644 * +------------+--------+ 645 * |Port 1 BCAST|SHARINDX| ... 646 * +------------+--------+ 647 * ... ... 648 * +------------+--------+ +---------------------------------+ 649 * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU | 650 * +------------+--------+ +---------------------------------+ 651 * 652 * In this driver, we shall use policers 0-4 as statically alocated port 653 * (matchall) policers. So we need to make the SHARINDX for all lookups 654 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast 655 * lookup) equal. 656 * The remaining policers (40) shall be dynamically allocated for flower 657 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff. 658 */ 659 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 660 661 static int sja1105_init_l2_policing(struct sja1105_private *priv) 662 { 663 struct sja1105_l2_policing_entry *policing; 664 struct sja1105_table *table; 665 int port, tc; 666 667 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 668 669 /* Discard previous L2 Policing Table */ 670 if (table->entry_count) { 671 kfree(table->entries); 672 table->entry_count = 0; 673 } 674 675 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, 676 table->ops->unpacked_entry_size, GFP_KERNEL); 677 if (!table->entries) 678 return -ENOMEM; 679 680 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; 681 682 policing = table->entries; 683 684 /* Setup shared indices for the matchall policers */ 685 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 686 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port; 687 688 for (tc = 0; tc < SJA1105_NUM_TC; tc++) 689 policing[port * SJA1105_NUM_TC + tc].sharindx = port; 690 691 policing[bcast].sharindx = port; 692 } 693 694 /* Setup the matchall policer parameters */ 695 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 696 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 697 698 if (dsa_is_cpu_port(priv->ds, port)) 699 mtu += VLAN_HLEN; 700 701 policing[port].smax = 65535; /* Burst size in bytes */ 702 policing[port].rate = SJA1105_RATE_MBPS(1000); 703 policing[port].maxlen = mtu; 704 policing[port].partition = 0; 705 } 706 707 return 0; 708 } 709 710 static int sja1105_static_config_load(struct sja1105_private *priv, 711 struct sja1105_dt_port *ports) 712 { 713 int rc; 714 715 sja1105_static_config_free(&priv->static_config); 716 rc = sja1105_static_config_init(&priv->static_config, 717 priv->info->static_ops, 718 priv->info->device_id); 719 if (rc) 720 return rc; 721 722 /* Build static configuration */ 723 rc = sja1105_init_mac_settings(priv); 724 if (rc < 0) 725 return rc; 726 rc = sja1105_init_mii_settings(priv, ports); 727 if (rc < 0) 728 return rc; 729 rc = sja1105_init_static_fdb(priv); 730 if (rc < 0) 731 return rc; 732 rc = sja1105_init_static_vlan(priv); 733 if (rc < 0) 734 return rc; 735 rc = sja1105_init_l2_lookup_params(priv); 736 if (rc < 0) 737 return rc; 738 rc = sja1105_init_l2_forwarding(priv); 739 if (rc < 0) 740 return rc; 741 rc = sja1105_init_l2_forwarding_params(priv); 742 if (rc < 0) 743 return rc; 744 rc = sja1105_init_l2_policing(priv); 745 if (rc < 0) 746 return rc; 747 rc = sja1105_init_general_params(priv); 748 if (rc < 0) 749 return rc; 750 rc = sja1105_init_avb_params(priv); 751 if (rc < 0) 752 return rc; 753 754 /* Send initial configuration to hardware via SPI */ 755 return sja1105_static_config_upload(priv); 756 } 757 758 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 759 const struct sja1105_dt_port *ports) 760 { 761 int i; 762 763 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 764 if (ports[i].role == XMII_MAC) 765 continue; 766 767 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 768 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 769 priv->rgmii_rx_delay[i] = true; 770 771 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 772 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 773 priv->rgmii_tx_delay[i] = true; 774 775 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 776 !priv->info->setup_rgmii_delay) 777 return -EINVAL; 778 } 779 return 0; 780 } 781 782 static int sja1105_parse_ports_node(struct sja1105_private *priv, 783 struct sja1105_dt_port *ports, 784 struct device_node *ports_node) 785 { 786 struct device *dev = &priv->spidev->dev; 787 struct device_node *child; 788 789 for_each_available_child_of_node(ports_node, child) { 790 struct device_node *phy_node; 791 phy_interface_t phy_mode; 792 u32 index; 793 int err; 794 795 /* Get switch port number from DT */ 796 if (of_property_read_u32(child, "reg", &index) < 0) { 797 dev_err(dev, "Port number not defined in device tree " 798 "(property \"reg\")\n"); 799 of_node_put(child); 800 return -ENODEV; 801 } 802 803 /* Get PHY mode from DT */ 804 err = of_get_phy_mode(child, &phy_mode); 805 if (err) { 806 dev_err(dev, "Failed to read phy-mode or " 807 "phy-interface-type property for port %d\n", 808 index); 809 of_node_put(child); 810 return -ENODEV; 811 } 812 ports[index].phy_mode = phy_mode; 813 814 phy_node = of_parse_phandle(child, "phy-handle", 0); 815 if (!phy_node) { 816 if (!of_phy_is_fixed_link(child)) { 817 dev_err(dev, "phy-handle or fixed-link " 818 "properties missing!\n"); 819 of_node_put(child); 820 return -ENODEV; 821 } 822 /* phy-handle is missing, but fixed-link isn't. 823 * So it's a fixed link. Default to PHY role. 824 */ 825 ports[index].role = XMII_PHY; 826 } else { 827 /* phy-handle present => put port in MAC role */ 828 ports[index].role = XMII_MAC; 829 of_node_put(phy_node); 830 } 831 832 /* The MAC/PHY role can be overridden with explicit bindings */ 833 if (of_property_read_bool(child, "sja1105,role-mac")) 834 ports[index].role = XMII_MAC; 835 else if (of_property_read_bool(child, "sja1105,role-phy")) 836 ports[index].role = XMII_PHY; 837 } 838 839 return 0; 840 } 841 842 static int sja1105_parse_dt(struct sja1105_private *priv, 843 struct sja1105_dt_port *ports) 844 { 845 struct device *dev = &priv->spidev->dev; 846 struct device_node *switch_node = dev->of_node; 847 struct device_node *ports_node; 848 int rc; 849 850 ports_node = of_get_child_by_name(switch_node, "ports"); 851 if (!ports_node) { 852 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 853 return -ENODEV; 854 } 855 856 rc = sja1105_parse_ports_node(priv, ports, ports_node); 857 of_node_put(ports_node); 858 859 return rc; 860 } 861 862 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg) 863 { 864 const struct sja1105_regs *regs = priv->info->regs; 865 u32 val; 866 int rc; 867 868 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val, 869 NULL); 870 if (rc < 0) 871 return rc; 872 873 return val; 874 } 875 876 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg, 877 u16 pcs_val) 878 { 879 const struct sja1105_regs *regs = priv->info->regs; 880 u32 val = pcs_val; 881 int rc; 882 883 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val, 884 NULL); 885 if (rc < 0) 886 return rc; 887 888 return val; 889 } 890 891 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv, 892 bool an_enabled, bool an_master) 893 { 894 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII; 895 896 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to 897 * stop the clock during LPI mode, make the MAC reconfigure 898 * autonomously after PCS autoneg is done, flush the internal FIFOs. 899 */ 900 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 | 901 SJA1105_DC1_CLOCK_STOP_EN | 902 SJA1105_DC1_MAC_AUTO_SW | 903 SJA1105_DC1_INIT); 904 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */ 905 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE); 906 /* AUTONEG_CONTROL: Use SGMII autoneg */ 907 if (an_master) 908 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK; 909 sja1105_sgmii_write(priv, SJA1105_AC, ac); 910 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise, 911 * sja1105_sgmii_pcs_force_speed must be called later for the link 912 * to become operational. 913 */ 914 if (an_enabled) 915 sja1105_sgmii_write(priv, MII_BMCR, 916 BMCR_ANENABLE | BMCR_ANRESTART); 917 } 918 919 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv, 920 int speed) 921 { 922 int pcs_speed; 923 924 switch (speed) { 925 case SPEED_1000: 926 pcs_speed = BMCR_SPEED1000; 927 break; 928 case SPEED_100: 929 pcs_speed = BMCR_SPEED100; 930 break; 931 case SPEED_10: 932 pcs_speed = BMCR_SPEED10; 933 break; 934 default: 935 dev_err(priv->ds->dev, "Invalid speed %d\n", speed); 936 return; 937 } 938 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX); 939 } 940 941 /* Convert link speed from SJA1105 to ethtool encoding */ 942 static int sja1105_speed[] = { 943 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN, 944 [SJA1105_SPEED_10MBPS] = SPEED_10, 945 [SJA1105_SPEED_100MBPS] = SPEED_100, 946 [SJA1105_SPEED_1000MBPS] = SPEED_1000, 947 }; 948 949 /* Set link speed in the MAC configuration for a specific port. */ 950 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 951 int speed_mbps) 952 { 953 struct sja1105_xmii_params_entry *mii; 954 struct sja1105_mac_config_entry *mac; 955 struct device *dev = priv->ds->dev; 956 sja1105_phy_interface_t phy_mode; 957 sja1105_speed_t speed; 958 int rc; 959 960 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 961 * tables. On E/T, MAC reconfig tables are not readable, only writable. 962 * We have to *know* what the MAC looks like. For the sake of keeping 963 * the code common, we'll use the static configuration tables as a 964 * reasonable approximation for both E/T and P/Q/R/S. 965 */ 966 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 967 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 968 969 switch (speed_mbps) { 970 case SPEED_UNKNOWN: 971 /* PHYLINK called sja1105_mac_config() to inform us about 972 * the state->interface, but AN has not completed and the 973 * speed is not yet valid. UM10944.pdf says that setting 974 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 975 * ok for power consumption in case AN will never complete - 976 * otherwise PHYLINK should come back with a new update. 977 */ 978 speed = SJA1105_SPEED_AUTO; 979 break; 980 case SPEED_10: 981 speed = SJA1105_SPEED_10MBPS; 982 break; 983 case SPEED_100: 984 speed = SJA1105_SPEED_100MBPS; 985 break; 986 case SPEED_1000: 987 speed = SJA1105_SPEED_1000MBPS; 988 break; 989 default: 990 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 991 return -EINVAL; 992 } 993 994 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 995 * table, since this will be used for the clocking setup, and we no 996 * longer need to store it in the static config (already told hardware 997 * we want auto during upload phase). 998 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and 999 * we need to configure the PCS only (if even that). 1000 */ 1001 if (sja1105_supports_sgmii(priv, port)) 1002 mac[port].speed = SJA1105_SPEED_1000MBPS; 1003 else 1004 mac[port].speed = speed; 1005 1006 /* Write to the dynamic reconfiguration tables */ 1007 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1008 &mac[port], true); 1009 if (rc < 0) { 1010 dev_err(dev, "Failed to write MAC config: %d\n", rc); 1011 return rc; 1012 } 1013 1014 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 1015 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 1016 * RMII no change of the clock setup is required. Actually, changing 1017 * the clock setup does interrupt the clock signal for a certain time 1018 * which causes trouble for all PHYs relying on this signal. 1019 */ 1020 phy_mode = mii->xmii_mode[port]; 1021 if (phy_mode != XMII_MODE_RGMII) 1022 return 0; 1023 1024 return sja1105_clocking_setup_port(priv, port); 1025 } 1026 1027 /* The SJA1105 MAC programming model is through the static config (the xMII 1028 * Mode table cannot be dynamically reconfigured), and we have to program 1029 * that early (earlier than PHYLINK calls us, anyway). 1030 * So just error out in case the connected PHY attempts to change the initial 1031 * system interface MII protocol from what is defined in the DT, at least for 1032 * now. 1033 */ 1034 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 1035 phy_interface_t interface) 1036 { 1037 struct sja1105_xmii_params_entry *mii; 1038 sja1105_phy_interface_t phy_mode; 1039 1040 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1041 phy_mode = mii->xmii_mode[port]; 1042 1043 switch (interface) { 1044 case PHY_INTERFACE_MODE_MII: 1045 return (phy_mode != XMII_MODE_MII); 1046 case PHY_INTERFACE_MODE_RMII: 1047 return (phy_mode != XMII_MODE_RMII); 1048 case PHY_INTERFACE_MODE_RGMII: 1049 case PHY_INTERFACE_MODE_RGMII_ID: 1050 case PHY_INTERFACE_MODE_RGMII_RXID: 1051 case PHY_INTERFACE_MODE_RGMII_TXID: 1052 return (phy_mode != XMII_MODE_RGMII); 1053 case PHY_INTERFACE_MODE_SGMII: 1054 return (phy_mode != XMII_MODE_SGMII); 1055 default: 1056 return true; 1057 } 1058 } 1059 1060 static void sja1105_mac_config(struct dsa_switch *ds, int port, 1061 unsigned int mode, 1062 const struct phylink_link_state *state) 1063 { 1064 struct sja1105_private *priv = ds->priv; 1065 bool is_sgmii = sja1105_supports_sgmii(priv, port); 1066 1067 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1068 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", 1069 phy_modes(state->interface)); 1070 return; 1071 } 1072 1073 if (phylink_autoneg_inband(mode) && !is_sgmii) { 1074 dev_err(ds->dev, "In-band AN not supported!\n"); 1075 return; 1076 } 1077 1078 if (is_sgmii) 1079 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode), 1080 false); 1081 } 1082 1083 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 1084 unsigned int mode, 1085 phy_interface_t interface) 1086 { 1087 sja1105_inhibit_tx(ds->priv, BIT(port), true); 1088 } 1089 1090 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 1091 unsigned int mode, 1092 phy_interface_t interface, 1093 struct phy_device *phydev, 1094 int speed, int duplex, 1095 bool tx_pause, bool rx_pause) 1096 { 1097 struct sja1105_private *priv = ds->priv; 1098 1099 sja1105_adjust_port_config(priv, port, speed); 1100 1101 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode)) 1102 sja1105_sgmii_pcs_force_speed(priv, speed); 1103 1104 sja1105_inhibit_tx(priv, BIT(port), false); 1105 } 1106 1107 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 1108 unsigned long *supported, 1109 struct phylink_link_state *state) 1110 { 1111 /* Construct a new mask which exhaustively contains all link features 1112 * supported by the MAC, and then apply that (logical AND) to what will 1113 * be sent to the PHY for "marketing". 1114 */ 1115 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1116 struct sja1105_private *priv = ds->priv; 1117 struct sja1105_xmii_params_entry *mii; 1118 1119 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1120 1121 /* include/linux/phylink.h says: 1122 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 1123 * expects the MAC driver to return all supported link modes. 1124 */ 1125 if (state->interface != PHY_INTERFACE_MODE_NA && 1126 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1127 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1128 return; 1129 } 1130 1131 /* The MAC does not support pause frames, and also doesn't 1132 * support half-duplex traffic modes. 1133 */ 1134 phylink_set(mask, Autoneg); 1135 phylink_set(mask, MII); 1136 phylink_set(mask, 10baseT_Full); 1137 phylink_set(mask, 100baseT_Full); 1138 phylink_set(mask, 100baseT1_Full); 1139 if (mii->xmii_mode[port] == XMII_MODE_RGMII || 1140 mii->xmii_mode[port] == XMII_MODE_SGMII) 1141 phylink_set(mask, 1000baseT_Full); 1142 1143 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 1144 bitmap_and(state->advertising, state->advertising, mask, 1145 __ETHTOOL_LINK_MODE_MASK_NBITS); 1146 } 1147 1148 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port, 1149 struct phylink_link_state *state) 1150 { 1151 struct sja1105_private *priv = ds->priv; 1152 int ais; 1153 1154 /* Read the vendor-specific AUTONEG_INTR_STATUS register */ 1155 ais = sja1105_sgmii_read(priv, SJA1105_AIS); 1156 if (ais < 0) 1157 return ais; 1158 1159 switch (SJA1105_AIS_SPEED(ais)) { 1160 case 0: 1161 state->speed = SPEED_10; 1162 break; 1163 case 1: 1164 state->speed = SPEED_100; 1165 break; 1166 case 2: 1167 state->speed = SPEED_1000; 1168 break; 1169 default: 1170 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n", 1171 SJA1105_AIS_SPEED(ais)); 1172 } 1173 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais); 1174 state->an_complete = SJA1105_AIS_COMPLETE(ais); 1175 state->link = SJA1105_AIS_LINK_STATUS(ais); 1176 1177 return 0; 1178 } 1179 1180 static int 1181 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 1182 const struct sja1105_l2_lookup_entry *requested) 1183 { 1184 struct sja1105_l2_lookup_entry *l2_lookup; 1185 struct sja1105_table *table; 1186 int i; 1187 1188 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1189 l2_lookup = table->entries; 1190 1191 for (i = 0; i < table->entry_count; i++) 1192 if (l2_lookup[i].macaddr == requested->macaddr && 1193 l2_lookup[i].vlanid == requested->vlanid && 1194 l2_lookup[i].destports & BIT(port)) 1195 return i; 1196 1197 return -1; 1198 } 1199 1200 /* We want FDB entries added statically through the bridge command to persist 1201 * across switch resets, which are a common thing during normal SJA1105 1202 * operation. So we have to back them up in the static configuration tables 1203 * and hence apply them on next static config upload... yay! 1204 */ 1205 static int 1206 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 1207 const struct sja1105_l2_lookup_entry *requested, 1208 bool keep) 1209 { 1210 struct sja1105_l2_lookup_entry *l2_lookup; 1211 struct sja1105_table *table; 1212 int rc, match; 1213 1214 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1215 1216 match = sja1105_find_static_fdb_entry(priv, port, requested); 1217 if (match < 0) { 1218 /* Can't delete a missing entry. */ 1219 if (!keep) 1220 return 0; 1221 1222 /* No match => new entry */ 1223 rc = sja1105_table_resize(table, table->entry_count + 1); 1224 if (rc) 1225 return rc; 1226 1227 match = table->entry_count - 1; 1228 } 1229 1230 /* Assign pointer after the resize (it may be new memory) */ 1231 l2_lookup = table->entries; 1232 1233 /* We have a match. 1234 * If the job was to add this FDB entry, it's already done (mostly 1235 * anyway, since the port forwarding mask may have changed, case in 1236 * which we update it). 1237 * Otherwise we have to delete it. 1238 */ 1239 if (keep) { 1240 l2_lookup[match] = *requested; 1241 return 0; 1242 } 1243 1244 /* To remove, the strategy is to overwrite the element with 1245 * the last one, and then reduce the array size by 1 1246 */ 1247 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 1248 return sja1105_table_resize(table, table->entry_count - 1); 1249 } 1250 1251 /* First-generation switches have a 4-way set associative TCAM that 1252 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 1253 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 1254 * For the placement of a newly learnt FDB entry, the switch selects the bin 1255 * based on a hash function, and the way within that bin incrementally. 1256 */ 1257 static int sja1105et_fdb_index(int bin, int way) 1258 { 1259 return bin * SJA1105ET_FDB_BIN_SIZE + way; 1260 } 1261 1262 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 1263 const u8 *addr, u16 vid, 1264 struct sja1105_l2_lookup_entry *match, 1265 int *last_unused) 1266 { 1267 int way; 1268 1269 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 1270 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1271 int index = sja1105et_fdb_index(bin, way); 1272 1273 /* Skip unused entries, optionally marking them 1274 * into the return value 1275 */ 1276 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1277 index, &l2_lookup)) { 1278 if (last_unused) 1279 *last_unused = way; 1280 continue; 1281 } 1282 1283 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 1284 l2_lookup.vlanid == vid) { 1285 if (match) 1286 *match = l2_lookup; 1287 return way; 1288 } 1289 } 1290 /* Return an invalid entry index if not found */ 1291 return -1; 1292 } 1293 1294 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 1295 const unsigned char *addr, u16 vid) 1296 { 1297 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1298 struct sja1105_private *priv = ds->priv; 1299 struct device *dev = ds->dev; 1300 int last_unused = -1; 1301 int bin, way, rc; 1302 1303 bin = sja1105et_fdb_hash(priv, addr, vid); 1304 1305 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1306 &l2_lookup, &last_unused); 1307 if (way >= 0) { 1308 /* We have an FDB entry. Is our port in the destination 1309 * mask? If yes, we need to do nothing. If not, we need 1310 * to rewrite the entry by adding this port to it. 1311 */ 1312 if (l2_lookup.destports & BIT(port)) 1313 return 0; 1314 l2_lookup.destports |= BIT(port); 1315 } else { 1316 int index = sja1105et_fdb_index(bin, way); 1317 1318 /* We don't have an FDB entry. We construct a new one and 1319 * try to find a place for it within the FDB table. 1320 */ 1321 l2_lookup.macaddr = ether_addr_to_u64(addr); 1322 l2_lookup.destports = BIT(port); 1323 l2_lookup.vlanid = vid; 1324 1325 if (last_unused >= 0) { 1326 way = last_unused; 1327 } else { 1328 /* Bin is full, need to evict somebody. 1329 * Choose victim at random. If you get these messages 1330 * often, you may need to consider changing the 1331 * distribution function: 1332 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1333 */ 1334 get_random_bytes(&way, sizeof(u8)); 1335 way %= SJA1105ET_FDB_BIN_SIZE; 1336 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1337 bin, addr, way); 1338 /* Evict entry */ 1339 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1340 index, NULL, false); 1341 } 1342 } 1343 l2_lookup.index = sja1105et_fdb_index(bin, way); 1344 1345 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1346 l2_lookup.index, &l2_lookup, 1347 true); 1348 if (rc < 0) 1349 return rc; 1350 1351 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1352 } 1353 1354 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1355 const unsigned char *addr, u16 vid) 1356 { 1357 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1358 struct sja1105_private *priv = ds->priv; 1359 int index, bin, way, rc; 1360 bool keep; 1361 1362 bin = sja1105et_fdb_hash(priv, addr, vid); 1363 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1364 &l2_lookup, NULL); 1365 if (way < 0) 1366 return 0; 1367 index = sja1105et_fdb_index(bin, way); 1368 1369 /* We have an FDB entry. Is our port in the destination mask? If yes, 1370 * we need to remove it. If the resulting port mask becomes empty, we 1371 * need to completely evict the FDB entry. 1372 * Otherwise we just write it back. 1373 */ 1374 l2_lookup.destports &= ~BIT(port); 1375 1376 if (l2_lookup.destports) 1377 keep = true; 1378 else 1379 keep = false; 1380 1381 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1382 index, &l2_lookup, keep); 1383 if (rc < 0) 1384 return rc; 1385 1386 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1387 } 1388 1389 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1390 const unsigned char *addr, u16 vid) 1391 { 1392 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1393 struct sja1105_private *priv = ds->priv; 1394 int rc, i; 1395 1396 /* Search for an existing entry in the FDB table */ 1397 l2_lookup.macaddr = ether_addr_to_u64(addr); 1398 l2_lookup.vlanid = vid; 1399 l2_lookup.iotag = SJA1105_S_TAG; 1400 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1401 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { 1402 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1403 l2_lookup.mask_iotag = BIT(0); 1404 } else { 1405 l2_lookup.mask_vlanid = 0; 1406 l2_lookup.mask_iotag = 0; 1407 } 1408 l2_lookup.destports = BIT(port); 1409 1410 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1411 SJA1105_SEARCH, &l2_lookup); 1412 if (rc == 0) { 1413 /* Found and this port is already in the entry's 1414 * port mask => job done 1415 */ 1416 if (l2_lookup.destports & BIT(port)) 1417 return 0; 1418 /* l2_lookup.index is populated by the switch in case it 1419 * found something. 1420 */ 1421 l2_lookup.destports |= BIT(port); 1422 goto skip_finding_an_index; 1423 } 1424 1425 /* Not found, so try to find an unused spot in the FDB. 1426 * This is slightly inefficient because the strategy is knock-knock at 1427 * every possible position from 0 to 1023. 1428 */ 1429 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1430 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1431 i, NULL); 1432 if (rc < 0) 1433 break; 1434 } 1435 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1436 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1437 return -EINVAL; 1438 } 1439 l2_lookup.lockeds = true; 1440 l2_lookup.index = i; 1441 1442 skip_finding_an_index: 1443 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1444 l2_lookup.index, &l2_lookup, 1445 true); 1446 if (rc < 0) 1447 return rc; 1448 1449 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1450 } 1451 1452 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1453 const unsigned char *addr, u16 vid) 1454 { 1455 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1456 struct sja1105_private *priv = ds->priv; 1457 bool keep; 1458 int rc; 1459 1460 l2_lookup.macaddr = ether_addr_to_u64(addr); 1461 l2_lookup.vlanid = vid; 1462 l2_lookup.iotag = SJA1105_S_TAG; 1463 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1464 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { 1465 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1466 l2_lookup.mask_iotag = BIT(0); 1467 } else { 1468 l2_lookup.mask_vlanid = 0; 1469 l2_lookup.mask_iotag = 0; 1470 } 1471 l2_lookup.destports = BIT(port); 1472 1473 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1474 SJA1105_SEARCH, &l2_lookup); 1475 if (rc < 0) 1476 return 0; 1477 1478 l2_lookup.destports &= ~BIT(port); 1479 1480 /* Decide whether we remove just this port from the FDB entry, 1481 * or if we remove it completely. 1482 */ 1483 if (l2_lookup.destports) 1484 keep = true; 1485 else 1486 keep = false; 1487 1488 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1489 l2_lookup.index, &l2_lookup, keep); 1490 if (rc < 0) 1491 return rc; 1492 1493 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1494 } 1495 1496 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1497 const unsigned char *addr, u16 vid) 1498 { 1499 struct sja1105_private *priv = ds->priv; 1500 1501 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1502 * so the switch still does some VLAN processing internally. 1503 * But Shared VLAN Learning (SVL) is also active, and it will take 1504 * care of autonomous forwarding between the unique pvid's of each 1505 * port. Here we just make sure that users can't add duplicate FDB 1506 * entries when in this mode - the actual VID doesn't matter except 1507 * for what gets printed in 'bridge fdb show'. In the case of zero, 1508 * no VID gets printed at all. 1509 */ 1510 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) 1511 vid = 0; 1512 1513 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1514 } 1515 1516 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1517 const unsigned char *addr, u16 vid) 1518 { 1519 struct sja1105_private *priv = ds->priv; 1520 1521 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) 1522 vid = 0; 1523 1524 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1525 } 1526 1527 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1528 dsa_fdb_dump_cb_t *cb, void *data) 1529 { 1530 struct sja1105_private *priv = ds->priv; 1531 struct device *dev = ds->dev; 1532 int i; 1533 1534 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1535 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1536 u8 macaddr[ETH_ALEN]; 1537 int rc; 1538 1539 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1540 i, &l2_lookup); 1541 /* No fdb entry at i, not an issue */ 1542 if (rc == -ENOENT) 1543 continue; 1544 if (rc) { 1545 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1546 return rc; 1547 } 1548 1549 /* FDB dump callback is per port. This means we have to 1550 * disregard a valid entry if it's not for this port, even if 1551 * only to revisit it later. This is inefficient because the 1552 * 1024-sized FDB table needs to be traversed 4 times through 1553 * SPI during a 'bridge fdb show' command. 1554 */ 1555 if (!(l2_lookup.destports & BIT(port))) 1556 continue; 1557 1558 /* We need to hide the FDB entry for unknown multicast */ 1559 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST && 1560 l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST) 1561 continue; 1562 1563 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1564 1565 /* We need to hide the dsa_8021q VLANs from the user. */ 1566 if (priv->vlan_state == SJA1105_VLAN_UNAWARE) 1567 l2_lookup.vlanid = 0; 1568 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1569 } 1570 return 0; 1571 } 1572 1573 static int sja1105_mdb_add(struct dsa_switch *ds, int port, 1574 const struct switchdev_obj_port_mdb *mdb) 1575 { 1576 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1577 } 1578 1579 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1580 const struct switchdev_obj_port_mdb *mdb) 1581 { 1582 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1583 } 1584 1585 /* Common function for unicast and broadcast flood configuration. 1586 * Flooding is configured between each {ingress, egress} port pair, and since 1587 * the bridge's semantics are those of "egress flooding", it means we must 1588 * enable flooding towards this port from all ingress ports that are in the 1589 * same forwarding domain. 1590 */ 1591 static int sja1105_manage_flood_domains(struct sja1105_private *priv) 1592 { 1593 struct sja1105_l2_forwarding_entry *l2_fwd; 1594 struct dsa_switch *ds = priv->ds; 1595 int from, to, rc; 1596 1597 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1598 1599 for (from = 0; from < ds->num_ports; from++) { 1600 u64 fl_domain = 0, bc_domain = 0; 1601 1602 for (to = 0; to < priv->ds->num_ports; to++) { 1603 if (!sja1105_can_forward(l2_fwd, from, to)) 1604 continue; 1605 1606 if (priv->ucast_egress_floods & BIT(to)) 1607 fl_domain |= BIT(to); 1608 if (priv->bcast_egress_floods & BIT(to)) 1609 bc_domain |= BIT(to); 1610 } 1611 1612 /* Nothing changed, nothing to do */ 1613 if (l2_fwd[from].fl_domain == fl_domain && 1614 l2_fwd[from].bc_domain == bc_domain) 1615 continue; 1616 1617 l2_fwd[from].fl_domain = fl_domain; 1618 l2_fwd[from].bc_domain = bc_domain; 1619 1620 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1621 from, &l2_fwd[from], true); 1622 if (rc < 0) 1623 return rc; 1624 } 1625 1626 return 0; 1627 } 1628 1629 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1630 struct net_device *br, bool member) 1631 { 1632 struct sja1105_l2_forwarding_entry *l2_fwd; 1633 struct sja1105_private *priv = ds->priv; 1634 int i, rc; 1635 1636 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1637 1638 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1639 /* Add this port to the forwarding matrix of the 1640 * other ports in the same bridge, and viceversa. 1641 */ 1642 if (!dsa_is_user_port(ds, i)) 1643 continue; 1644 /* For the ports already under the bridge, only one thing needs 1645 * to be done, and that is to add this port to their 1646 * reachability domain. So we can perform the SPI write for 1647 * them immediately. However, for this port itself (the one 1648 * that is new to the bridge), we need to add all other ports 1649 * to its reachability domain. So we do that incrementally in 1650 * this loop, and perform the SPI write only at the end, once 1651 * the domain contains all other bridge ports. 1652 */ 1653 if (i == port) 1654 continue; 1655 if (dsa_to_port(ds, i)->bridge_dev != br) 1656 continue; 1657 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1658 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1659 1660 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1661 i, &l2_fwd[i], true); 1662 if (rc < 0) 1663 return rc; 1664 } 1665 1666 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1667 port, &l2_fwd[port], true); 1668 if (rc) 1669 return rc; 1670 1671 return sja1105_manage_flood_domains(priv); 1672 } 1673 1674 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1675 u8 state) 1676 { 1677 struct sja1105_private *priv = ds->priv; 1678 struct sja1105_mac_config_entry *mac; 1679 1680 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1681 1682 switch (state) { 1683 case BR_STATE_DISABLED: 1684 case BR_STATE_BLOCKING: 1685 /* From UM10944 description of DRPDTAG (why put this there?): 1686 * "Management traffic flows to the port regardless of the state 1687 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1688 * At the moment no difference between DISABLED and BLOCKING. 1689 */ 1690 mac[port].ingress = false; 1691 mac[port].egress = false; 1692 mac[port].dyn_learn = false; 1693 break; 1694 case BR_STATE_LISTENING: 1695 mac[port].ingress = true; 1696 mac[port].egress = false; 1697 mac[port].dyn_learn = false; 1698 break; 1699 case BR_STATE_LEARNING: 1700 mac[port].ingress = true; 1701 mac[port].egress = false; 1702 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); 1703 break; 1704 case BR_STATE_FORWARDING: 1705 mac[port].ingress = true; 1706 mac[port].egress = true; 1707 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); 1708 break; 1709 default: 1710 dev_err(ds->dev, "invalid STP state: %d\n", state); 1711 return; 1712 } 1713 1714 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1715 &mac[port], true); 1716 } 1717 1718 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1719 struct net_device *br) 1720 { 1721 return sja1105_bridge_member(ds, port, br, true); 1722 } 1723 1724 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1725 struct net_device *br) 1726 { 1727 sja1105_bridge_member(ds, port, br, false); 1728 } 1729 1730 #define BYTES_PER_KBIT (1000LL / 8) 1731 1732 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) 1733 { 1734 int i; 1735 1736 for (i = 0; i < priv->info->num_cbs_shapers; i++) 1737 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) 1738 return i; 1739 1740 return -1; 1741 } 1742 1743 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port, 1744 int prio) 1745 { 1746 int i; 1747 1748 for (i = 0; i < priv->info->num_cbs_shapers; i++) { 1749 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; 1750 1751 if (cbs->port == port && cbs->prio == prio) { 1752 memset(cbs, 0, sizeof(*cbs)); 1753 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, 1754 i, cbs, true); 1755 } 1756 } 1757 1758 return 0; 1759 } 1760 1761 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, 1762 struct tc_cbs_qopt_offload *offload) 1763 { 1764 struct sja1105_private *priv = ds->priv; 1765 struct sja1105_cbs_entry *cbs; 1766 int index; 1767 1768 if (!offload->enable) 1769 return sja1105_delete_cbs_shaper(priv, port, offload->queue); 1770 1771 index = sja1105_find_unused_cbs_shaper(priv); 1772 if (index < 0) 1773 return -ENOSPC; 1774 1775 cbs = &priv->cbs[index]; 1776 cbs->port = port; 1777 cbs->prio = offload->queue; 1778 /* locredit and sendslope are negative by definition. In hardware, 1779 * positive values must be provided, and the negative sign is implicit. 1780 */ 1781 cbs->credit_hi = offload->hicredit; 1782 cbs->credit_lo = abs(offload->locredit); 1783 /* User space is in kbits/sec, hardware in bytes/sec */ 1784 cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; 1785 cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); 1786 /* Convert the negative values from 64-bit 2's complement 1787 * to 32-bit 2's complement (for the case of 0x80000000 whose 1788 * negative is still negative). 1789 */ 1790 cbs->credit_lo &= GENMASK_ULL(31, 0); 1791 cbs->send_slope &= GENMASK_ULL(31, 0); 1792 1793 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs, 1794 true); 1795 } 1796 1797 static int sja1105_reload_cbs(struct sja1105_private *priv) 1798 { 1799 int rc = 0, i; 1800 1801 for (i = 0; i < priv->info->num_cbs_shapers; i++) { 1802 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; 1803 1804 if (!cbs->idle_slope && !cbs->send_slope) 1805 continue; 1806 1807 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs, 1808 true); 1809 if (rc) 1810 break; 1811 } 1812 1813 return rc; 1814 } 1815 1816 static const char * const sja1105_reset_reasons[] = { 1817 [SJA1105_VLAN_FILTERING] = "VLAN filtering", 1818 [SJA1105_RX_HWTSTAMPING] = "RX timestamping", 1819 [SJA1105_AGEING_TIME] = "Ageing time", 1820 [SJA1105_SCHEDULING] = "Time-aware scheduling", 1821 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing", 1822 [SJA1105_VIRTUAL_LINKS] = "Virtual links", 1823 }; 1824 1825 /* For situations where we need to change a setting at runtime that is only 1826 * available through the static configuration, resetting the switch in order 1827 * to upload the new static config is unavoidable. Back up the settings we 1828 * modify at runtime (currently only MAC) and restore them after uploading, 1829 * such that this operation is relatively seamless. 1830 */ 1831 int sja1105_static_config_reload(struct sja1105_private *priv, 1832 enum sja1105_reset_reason reason) 1833 { 1834 struct ptp_system_timestamp ptp_sts_before; 1835 struct ptp_system_timestamp ptp_sts_after; 1836 struct sja1105_mac_config_entry *mac; 1837 int speed_mbps[SJA1105_NUM_PORTS]; 1838 struct dsa_switch *ds = priv->ds; 1839 s64 t1, t2, t3, t4; 1840 s64 t12, t34; 1841 u16 bmcr = 0; 1842 int rc, i; 1843 s64 now; 1844 1845 mutex_lock(&priv->mgmt_lock); 1846 1847 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1848 1849 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1850 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1851 * switch wants to see in the static config in order to allow us to 1852 * change it through the dynamic interface later. 1853 */ 1854 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1855 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1856 mac[i].speed = SJA1105_SPEED_AUTO; 1857 } 1858 1859 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) 1860 bmcr = sja1105_sgmii_read(priv, MII_BMCR); 1861 1862 /* No PTP operations can run right now */ 1863 mutex_lock(&priv->ptp_data.lock); 1864 1865 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before); 1866 if (rc < 0) 1867 goto out_unlock_ptp; 1868 1869 /* Reset switch and send updated static configuration */ 1870 rc = sja1105_static_config_upload(priv); 1871 if (rc < 0) 1872 goto out_unlock_ptp; 1873 1874 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after); 1875 if (rc < 0) 1876 goto out_unlock_ptp; 1877 1878 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts); 1879 t2 = timespec64_to_ns(&ptp_sts_before.post_ts); 1880 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts); 1881 t4 = timespec64_to_ns(&ptp_sts_after.post_ts); 1882 /* Mid point, corresponds to pre-reset PTPCLKVAL */ 1883 t12 = t1 + (t2 - t1) / 2; 1884 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ 1885 t34 = t3 + (t4 - t3) / 2; 1886 /* Advance PTPCLKVAL by the time it took since its readout */ 1887 now += (t34 - t12); 1888 1889 __sja1105_ptp_adjtime(ds, now); 1890 1891 out_unlock_ptp: 1892 mutex_unlock(&priv->ptp_data.lock); 1893 1894 dev_info(priv->ds->dev, 1895 "Reset switch and programmed static config. Reason: %s\n", 1896 sja1105_reset_reasons[reason]); 1897 1898 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1899 * For these interfaces there is no dynamic configuration 1900 * needed, since PLLs have same settings at all speeds. 1901 */ 1902 rc = sja1105_clocking_setup(priv); 1903 if (rc < 0) 1904 goto out; 1905 1906 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1907 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1908 if (rc < 0) 1909 goto out; 1910 } 1911 1912 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) { 1913 bool an_enabled = !!(bmcr & BMCR_ANENABLE); 1914 1915 sja1105_sgmii_pcs_config(priv, an_enabled, false); 1916 1917 if (!an_enabled) { 1918 int speed = SPEED_UNKNOWN; 1919 1920 if (bmcr & BMCR_SPEED1000) 1921 speed = SPEED_1000; 1922 else if (bmcr & BMCR_SPEED100) 1923 speed = SPEED_100; 1924 else 1925 speed = SPEED_10; 1926 1927 sja1105_sgmii_pcs_force_speed(priv, speed); 1928 } 1929 } 1930 1931 rc = sja1105_reload_cbs(priv); 1932 if (rc < 0) 1933 goto out; 1934 out: 1935 mutex_unlock(&priv->mgmt_lock); 1936 1937 return rc; 1938 } 1939 1940 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1941 { 1942 struct sja1105_mac_config_entry *mac; 1943 1944 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1945 1946 mac[port].vlanid = pvid; 1947 1948 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1949 &mac[port], true); 1950 } 1951 1952 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds, 1953 int tree_index, int sw_index, 1954 int other_port, struct net_device *br) 1955 { 1956 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); 1957 struct sja1105_private *other_priv = other_ds->priv; 1958 struct sja1105_private *priv = ds->priv; 1959 int port, rc; 1960 1961 if (other_ds->ops != &sja1105_switch_ops) 1962 return 0; 1963 1964 for (port = 0; port < ds->num_ports; port++) { 1965 if (!dsa_is_user_port(ds, port)) 1966 continue; 1967 if (dsa_to_port(ds, port)->bridge_dev != br) 1968 continue; 1969 1970 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx, 1971 port, 1972 other_priv->dsa_8021q_ctx, 1973 other_port); 1974 if (rc) 1975 return rc; 1976 1977 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx, 1978 other_port, 1979 priv->dsa_8021q_ctx, 1980 port); 1981 if (rc) 1982 return rc; 1983 } 1984 1985 return 0; 1986 } 1987 1988 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds, 1989 int tree_index, int sw_index, 1990 int other_port, 1991 struct net_device *br) 1992 { 1993 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); 1994 struct sja1105_private *other_priv = other_ds->priv; 1995 struct sja1105_private *priv = ds->priv; 1996 int port; 1997 1998 if (other_ds->ops != &sja1105_switch_ops) 1999 return; 2000 2001 for (port = 0; port < ds->num_ports; port++) { 2002 if (!dsa_is_user_port(ds, port)) 2003 continue; 2004 if (dsa_to_port(ds, port)->bridge_dev != br) 2005 continue; 2006 2007 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port, 2008 other_priv->dsa_8021q_ctx, 2009 other_port); 2010 2011 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx, 2012 other_port, 2013 priv->dsa_8021q_ctx, port); 2014 } 2015 } 2016 2017 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 2018 { 2019 struct sja1105_private *priv = ds->priv; 2020 int rc; 2021 2022 rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled); 2023 if (rc) 2024 return rc; 2025 2026 dev_info(ds->dev, "%s switch tagging\n", 2027 enabled ? "Enabled" : "Disabled"); 2028 return 0; 2029 } 2030 2031 static enum dsa_tag_protocol 2032 sja1105_get_tag_protocol(struct dsa_switch *ds, int port, 2033 enum dsa_tag_protocol mp) 2034 { 2035 return DSA_TAG_PROTO_SJA1105; 2036 } 2037 2038 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid) 2039 { 2040 int subvlan; 2041 2042 if (pvid) 2043 return 0; 2044 2045 for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2046 if (subvlan_map[subvlan] == VLAN_N_VID) 2047 return subvlan; 2048 2049 return -1; 2050 } 2051 2052 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid) 2053 { 2054 int subvlan; 2055 2056 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2057 if (subvlan_map[subvlan] == vid) 2058 return subvlan; 2059 2060 return -1; 2061 } 2062 2063 static int sja1105_find_committed_subvlan(struct sja1105_private *priv, 2064 int port, u16 vid) 2065 { 2066 struct sja1105_port *sp = &priv->ports[port]; 2067 2068 return sja1105_find_subvlan(sp->subvlan_map, vid); 2069 } 2070 2071 static void sja1105_init_subvlan_map(u16 *subvlan_map) 2072 { 2073 int subvlan; 2074 2075 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2076 subvlan_map[subvlan] = VLAN_N_VID; 2077 } 2078 2079 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port, 2080 u16 *subvlan_map) 2081 { 2082 struct sja1105_port *sp = &priv->ports[port]; 2083 int subvlan; 2084 2085 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 2086 sp->subvlan_map[subvlan] = subvlan_map[subvlan]; 2087 } 2088 2089 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 2090 { 2091 struct sja1105_vlan_lookup_entry *vlan; 2092 int count, i; 2093 2094 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 2095 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 2096 2097 for (i = 0; i < count; i++) 2098 if (vlan[i].vlanid == vid) 2099 return i; 2100 2101 /* Return an invalid entry index if not found */ 2102 return -1; 2103 } 2104 2105 static int 2106 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging, 2107 int count, int from_port, u16 from_vid, 2108 u16 to_vid) 2109 { 2110 int i; 2111 2112 for (i = 0; i < count; i++) 2113 if (retagging[i].ing_port == BIT(from_port) && 2114 retagging[i].vlan_ing == from_vid && 2115 retagging[i].vlan_egr == to_vid) 2116 return i; 2117 2118 /* Return an invalid entry index if not found */ 2119 return -1; 2120 } 2121 2122 static int sja1105_commit_vlans(struct sja1105_private *priv, 2123 struct sja1105_vlan_lookup_entry *new_vlan, 2124 struct sja1105_retagging_entry *new_retagging, 2125 int num_retagging) 2126 { 2127 struct sja1105_retagging_entry *retagging; 2128 struct sja1105_vlan_lookup_entry *vlan; 2129 struct sja1105_table *table; 2130 int num_vlans = 0; 2131 int rc, i, k = 0; 2132 2133 /* VLAN table */ 2134 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2135 vlan = table->entries; 2136 2137 for (i = 0; i < VLAN_N_VID; i++) { 2138 int match = sja1105_is_vlan_configured(priv, i); 2139 2140 if (new_vlan[i].vlanid != VLAN_N_VID) 2141 num_vlans++; 2142 2143 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) { 2144 /* Was there before, no longer is. Delete */ 2145 dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i); 2146 rc = sja1105_dynamic_config_write(priv, 2147 BLK_IDX_VLAN_LOOKUP, 2148 i, &vlan[match], false); 2149 if (rc < 0) 2150 return rc; 2151 } else if (new_vlan[i].vlanid != VLAN_N_VID) { 2152 /* Nothing changed, don't do anything */ 2153 if (match >= 0 && 2154 vlan[match].vlanid == new_vlan[i].vlanid && 2155 vlan[match].tag_port == new_vlan[i].tag_port && 2156 vlan[match].vlan_bc == new_vlan[i].vlan_bc && 2157 vlan[match].vmemb_port == new_vlan[i].vmemb_port) 2158 continue; 2159 /* Update entry */ 2160 dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i); 2161 rc = sja1105_dynamic_config_write(priv, 2162 BLK_IDX_VLAN_LOOKUP, 2163 i, &new_vlan[i], 2164 true); 2165 if (rc < 0) 2166 return rc; 2167 } 2168 } 2169 2170 if (table->entry_count) 2171 kfree(table->entries); 2172 2173 table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size, 2174 GFP_KERNEL); 2175 if (!table->entries) 2176 return -ENOMEM; 2177 2178 table->entry_count = num_vlans; 2179 vlan = table->entries; 2180 2181 for (i = 0; i < VLAN_N_VID; i++) { 2182 if (new_vlan[i].vlanid == VLAN_N_VID) 2183 continue; 2184 vlan[k++] = new_vlan[i]; 2185 } 2186 2187 /* VLAN Retagging Table */ 2188 table = &priv->static_config.tables[BLK_IDX_RETAGGING]; 2189 retagging = table->entries; 2190 2191 for (i = 0; i < table->entry_count; i++) { 2192 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, 2193 i, &retagging[i], false); 2194 if (rc) 2195 return rc; 2196 } 2197 2198 if (table->entry_count) 2199 kfree(table->entries); 2200 2201 table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size, 2202 GFP_KERNEL); 2203 if (!table->entries) 2204 return -ENOMEM; 2205 2206 table->entry_count = num_retagging; 2207 retagging = table->entries; 2208 2209 for (i = 0; i < num_retagging; i++) { 2210 retagging[i] = new_retagging[i]; 2211 2212 /* Update entry */ 2213 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, 2214 i, &retagging[i], true); 2215 if (rc < 0) 2216 return rc; 2217 } 2218 2219 return 0; 2220 } 2221 2222 struct sja1105_crosschip_vlan { 2223 struct list_head list; 2224 u16 vid; 2225 bool untagged; 2226 int port; 2227 int other_port; 2228 struct dsa_8021q_context *other_ctx; 2229 }; 2230 2231 struct sja1105_crosschip_switch { 2232 struct list_head list; 2233 struct dsa_8021q_context *other_ctx; 2234 }; 2235 2236 static int sja1105_commit_pvid(struct sja1105_private *priv) 2237 { 2238 struct sja1105_bridge_vlan *v; 2239 struct list_head *vlan_list; 2240 int rc = 0; 2241 2242 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2243 vlan_list = &priv->bridge_vlans; 2244 else 2245 vlan_list = &priv->dsa_8021q_vlans; 2246 2247 list_for_each_entry(v, vlan_list, list) { 2248 if (v->pvid) { 2249 rc = sja1105_pvid_apply(priv, v->port, v->vid); 2250 if (rc) 2251 break; 2252 } 2253 } 2254 2255 return rc; 2256 } 2257 2258 static int 2259 sja1105_build_bridge_vlans(struct sja1105_private *priv, 2260 struct sja1105_vlan_lookup_entry *new_vlan) 2261 { 2262 struct sja1105_bridge_vlan *v; 2263 2264 if (priv->vlan_state == SJA1105_VLAN_UNAWARE) 2265 return 0; 2266 2267 list_for_each_entry(v, &priv->bridge_vlans, list) { 2268 int match = v->vid; 2269 2270 new_vlan[match].vlanid = v->vid; 2271 new_vlan[match].vmemb_port |= BIT(v->port); 2272 new_vlan[match].vlan_bc |= BIT(v->port); 2273 if (!v->untagged) 2274 new_vlan[match].tag_port |= BIT(v->port); 2275 } 2276 2277 return 0; 2278 } 2279 2280 static int 2281 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv, 2282 struct sja1105_vlan_lookup_entry *new_vlan) 2283 { 2284 struct sja1105_bridge_vlan *v; 2285 2286 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2287 return 0; 2288 2289 list_for_each_entry(v, &priv->dsa_8021q_vlans, list) { 2290 int match = v->vid; 2291 2292 new_vlan[match].vlanid = v->vid; 2293 new_vlan[match].vmemb_port |= BIT(v->port); 2294 new_vlan[match].vlan_bc |= BIT(v->port); 2295 if (!v->untagged) 2296 new_vlan[match].tag_port |= BIT(v->port); 2297 } 2298 2299 return 0; 2300 } 2301 2302 static int sja1105_build_subvlans(struct sja1105_private *priv, 2303 u16 subvlan_map[][DSA_8021Q_N_SUBVLAN], 2304 struct sja1105_vlan_lookup_entry *new_vlan, 2305 struct sja1105_retagging_entry *new_retagging, 2306 int *num_retagging) 2307 { 2308 struct sja1105_bridge_vlan *v; 2309 int k = *num_retagging; 2310 2311 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) 2312 return 0; 2313 2314 list_for_each_entry(v, &priv->bridge_vlans, list) { 2315 int upstream = dsa_upstream_port(priv->ds, v->port); 2316 int match, subvlan; 2317 u16 rx_vid; 2318 2319 /* Only sub-VLANs on user ports need to be applied. 2320 * Bridge VLANs also include VLANs added automatically 2321 * by DSA on the CPU port. 2322 */ 2323 if (!dsa_is_user_port(priv->ds, v->port)) 2324 continue; 2325 2326 subvlan = sja1105_find_subvlan(subvlan_map[v->port], 2327 v->vid); 2328 if (subvlan < 0) { 2329 subvlan = sja1105_find_free_subvlan(subvlan_map[v->port], 2330 v->pvid); 2331 if (subvlan < 0) { 2332 dev_err(priv->ds->dev, "No more free subvlans\n"); 2333 return -ENOSPC; 2334 } 2335 } 2336 2337 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan); 2338 2339 /* @v->vid on @v->port needs to be retagged to @rx_vid 2340 * on @upstream. Assume @v->vid on @v->port and on 2341 * @upstream was already configured by the previous 2342 * iteration over bridge_vlans. 2343 */ 2344 match = rx_vid; 2345 new_vlan[match].vlanid = rx_vid; 2346 new_vlan[match].vmemb_port |= BIT(v->port); 2347 new_vlan[match].vmemb_port |= BIT(upstream); 2348 new_vlan[match].vlan_bc |= BIT(v->port); 2349 new_vlan[match].vlan_bc |= BIT(upstream); 2350 /* The "untagged" flag is set the same as for the 2351 * original VLAN 2352 */ 2353 if (!v->untagged) 2354 new_vlan[match].tag_port |= BIT(v->port); 2355 /* But it's always tagged towards the CPU */ 2356 new_vlan[match].tag_port |= BIT(upstream); 2357 2358 /* The Retagging Table generates packet *clones* with 2359 * the new VLAN. This is a very odd hardware quirk 2360 * which we need to suppress by dropping the original 2361 * packet. 2362 * Deny egress of the original VLAN towards the CPU 2363 * port. This will force the switch to drop it, and 2364 * we'll see only the retagged packets. 2365 */ 2366 match = v->vid; 2367 new_vlan[match].vlan_bc &= ~BIT(upstream); 2368 2369 /* And the retagging itself */ 2370 new_retagging[k].vlan_ing = v->vid; 2371 new_retagging[k].vlan_egr = rx_vid; 2372 new_retagging[k].ing_port = BIT(v->port); 2373 new_retagging[k].egr_port = BIT(upstream); 2374 if (k++ == SJA1105_MAX_RETAGGING_COUNT) { 2375 dev_err(priv->ds->dev, "No more retagging rules\n"); 2376 return -ENOSPC; 2377 } 2378 2379 subvlan_map[v->port][subvlan] = v->vid; 2380 } 2381 2382 *num_retagging = k; 2383 2384 return 0; 2385 } 2386 2387 /* Sadly, in crosschip scenarios where the CPU port is also the link to another 2388 * switch, we should retag backwards (the dsa_8021q vid to the original vid) on 2389 * the CPU port of neighbour switches. 2390 */ 2391 static int 2392 sja1105_build_crosschip_subvlans(struct sja1105_private *priv, 2393 struct sja1105_vlan_lookup_entry *new_vlan, 2394 struct sja1105_retagging_entry *new_retagging, 2395 int *num_retagging) 2396 { 2397 struct sja1105_crosschip_vlan *tmp, *pos; 2398 struct dsa_8021q_crosschip_link *c; 2399 struct sja1105_bridge_vlan *v, *w; 2400 struct list_head crosschip_vlans; 2401 int k = *num_retagging; 2402 int rc = 0; 2403 2404 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) 2405 return 0; 2406 2407 INIT_LIST_HEAD(&crosschip_vlans); 2408 2409 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { 2410 struct sja1105_private *other_priv = c->other_ctx->ds->priv; 2411 2412 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) 2413 continue; 2414 2415 /* Crosschip links are also added to the CPU ports. 2416 * Ignore those. 2417 */ 2418 if (!dsa_is_user_port(priv->ds, c->port)) 2419 continue; 2420 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port)) 2421 continue; 2422 2423 /* Search for VLANs on the remote port */ 2424 list_for_each_entry(v, &other_priv->bridge_vlans, list) { 2425 bool already_added = false; 2426 bool we_have_it = false; 2427 2428 if (v->port != c->other_port) 2429 continue; 2430 2431 /* If @v is a pvid on @other_ds, it does not need 2432 * re-retagging, because its SVL field is 0 and we 2433 * already allow that, via the dsa_8021q crosschip 2434 * links. 2435 */ 2436 if (v->pvid) 2437 continue; 2438 2439 /* Search for the VLAN on our local port */ 2440 list_for_each_entry(w, &priv->bridge_vlans, list) { 2441 if (w->port == c->port && w->vid == v->vid) { 2442 we_have_it = true; 2443 break; 2444 } 2445 } 2446 2447 if (!we_have_it) 2448 continue; 2449 2450 list_for_each_entry(tmp, &crosschip_vlans, list) { 2451 if (tmp->vid == v->vid && 2452 tmp->untagged == v->untagged && 2453 tmp->port == c->port && 2454 tmp->other_port == v->port && 2455 tmp->other_ctx == c->other_ctx) { 2456 already_added = true; 2457 break; 2458 } 2459 } 2460 2461 if (already_added) 2462 continue; 2463 2464 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2465 if (!tmp) { 2466 dev_err(priv->ds->dev, "Failed to allocate memory\n"); 2467 rc = -ENOMEM; 2468 goto out; 2469 } 2470 tmp->vid = v->vid; 2471 tmp->port = c->port; 2472 tmp->other_port = v->port; 2473 tmp->other_ctx = c->other_ctx; 2474 tmp->untagged = v->untagged; 2475 list_add(&tmp->list, &crosschip_vlans); 2476 } 2477 } 2478 2479 list_for_each_entry(tmp, &crosschip_vlans, list) { 2480 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv; 2481 int upstream = dsa_upstream_port(priv->ds, tmp->port); 2482 int match, subvlan; 2483 u16 rx_vid; 2484 2485 subvlan = sja1105_find_committed_subvlan(other_priv, 2486 tmp->other_port, 2487 tmp->vid); 2488 /* If this happens, it's a bug. The neighbour switch does not 2489 * have a subvlan for tmp->vid on tmp->other_port, but it 2490 * should, since we already checked for its vlan_state. 2491 */ 2492 if (WARN_ON(subvlan < 0)) { 2493 rc = -EINVAL; 2494 goto out; 2495 } 2496 2497 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds, 2498 tmp->other_port, 2499 subvlan); 2500 2501 /* The @rx_vid retagged from @tmp->vid on 2502 * {@tmp->other_ds, @tmp->other_port} needs to be 2503 * re-retagged to @tmp->vid on the way back to us. 2504 * 2505 * Assume the original @tmp->vid is already configured 2506 * on this local switch, otherwise we wouldn't be 2507 * retagging its subvlan on the other switch in the 2508 * first place. We just need to add a reverse retagging 2509 * rule for @rx_vid and install @rx_vid on our ports. 2510 */ 2511 match = rx_vid; 2512 new_vlan[match].vlanid = rx_vid; 2513 new_vlan[match].vmemb_port |= BIT(tmp->port); 2514 new_vlan[match].vmemb_port |= BIT(upstream); 2515 /* The "untagged" flag is set the same as for the 2516 * original VLAN. And towards the CPU, it doesn't 2517 * really matter, because @rx_vid will only receive 2518 * traffic on that port. For consistency with other dsa_8021q 2519 * VLANs, we'll keep the CPU port tagged. 2520 */ 2521 if (!tmp->untagged) 2522 new_vlan[match].tag_port |= BIT(tmp->port); 2523 new_vlan[match].tag_port |= BIT(upstream); 2524 /* Deny egress of @rx_vid towards our front-panel port. 2525 * This will force the switch to drop it, and we'll see 2526 * only the re-retagged packets (having the original, 2527 * pre-initial-retagging, VLAN @tmp->vid). 2528 */ 2529 new_vlan[match].vlan_bc &= ~BIT(tmp->port); 2530 2531 /* On reverse retagging, the same ingress VLAN goes to multiple 2532 * ports. So we have an opportunity to create composite rules 2533 * to not waste the limited space in the retagging table. 2534 */ 2535 k = sja1105_find_retagging_entry(new_retagging, *num_retagging, 2536 upstream, rx_vid, tmp->vid); 2537 if (k < 0) { 2538 if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) { 2539 dev_err(priv->ds->dev, "No more retagging rules\n"); 2540 rc = -ENOSPC; 2541 goto out; 2542 } 2543 k = (*num_retagging)++; 2544 } 2545 /* And the retagging itself */ 2546 new_retagging[k].vlan_ing = rx_vid; 2547 new_retagging[k].vlan_egr = tmp->vid; 2548 new_retagging[k].ing_port = BIT(upstream); 2549 new_retagging[k].egr_port |= BIT(tmp->port); 2550 } 2551 2552 out: 2553 list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) { 2554 list_del(&tmp->list); 2555 kfree(tmp); 2556 } 2557 2558 return rc; 2559 } 2560 2561 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify); 2562 2563 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv) 2564 { 2565 struct sja1105_crosschip_switch *s, *pos; 2566 struct list_head crosschip_switches; 2567 struct dsa_8021q_crosschip_link *c; 2568 int rc = 0; 2569 2570 INIT_LIST_HEAD(&crosschip_switches); 2571 2572 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { 2573 bool already_added = false; 2574 2575 list_for_each_entry(s, &crosschip_switches, list) { 2576 if (s->other_ctx == c->other_ctx) { 2577 already_added = true; 2578 break; 2579 } 2580 } 2581 2582 if (already_added) 2583 continue; 2584 2585 s = kzalloc(sizeof(*s), GFP_KERNEL); 2586 if (!s) { 2587 dev_err(priv->ds->dev, "Failed to allocate memory\n"); 2588 rc = -ENOMEM; 2589 goto out; 2590 } 2591 s->other_ctx = c->other_ctx; 2592 list_add(&s->list, &crosschip_switches); 2593 } 2594 2595 list_for_each_entry(s, &crosschip_switches, list) { 2596 struct sja1105_private *other_priv = s->other_ctx->ds->priv; 2597 2598 rc = sja1105_build_vlan_table(other_priv, false); 2599 if (rc) 2600 goto out; 2601 } 2602 2603 out: 2604 list_for_each_entry_safe(s, pos, &crosschip_switches, list) { 2605 list_del(&s->list); 2606 kfree(s); 2607 } 2608 2609 return rc; 2610 } 2611 2612 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify) 2613 { 2614 u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN]; 2615 struct sja1105_retagging_entry *new_retagging; 2616 struct sja1105_vlan_lookup_entry *new_vlan; 2617 struct sja1105_table *table; 2618 int i, num_retagging = 0; 2619 int rc; 2620 2621 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2622 new_vlan = kcalloc(VLAN_N_VID, 2623 table->ops->unpacked_entry_size, GFP_KERNEL); 2624 if (!new_vlan) 2625 return -ENOMEM; 2626 2627 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 2628 new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT, 2629 table->ops->unpacked_entry_size, GFP_KERNEL); 2630 if (!new_retagging) { 2631 kfree(new_vlan); 2632 return -ENOMEM; 2633 } 2634 2635 for (i = 0; i < VLAN_N_VID; i++) 2636 new_vlan[i].vlanid = VLAN_N_VID; 2637 2638 for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++) 2639 new_retagging[i].vlan_ing = VLAN_N_VID; 2640 2641 for (i = 0; i < priv->ds->num_ports; i++) 2642 sja1105_init_subvlan_map(subvlan_map[i]); 2643 2644 /* Bridge VLANs */ 2645 rc = sja1105_build_bridge_vlans(priv, new_vlan); 2646 if (rc) 2647 goto out; 2648 2649 /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c: 2650 * - RX VLANs 2651 * - TX VLANs 2652 * - Crosschip links 2653 */ 2654 rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan); 2655 if (rc) 2656 goto out; 2657 2658 /* Private VLANs necessary for dsa_8021q operation, which we need to 2659 * determine on our own: 2660 * - Sub-VLANs 2661 * - Sub-VLANs of crosschip switches 2662 */ 2663 rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging, 2664 &num_retagging); 2665 if (rc) 2666 goto out; 2667 2668 rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging, 2669 &num_retagging); 2670 if (rc) 2671 goto out; 2672 2673 rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging); 2674 if (rc) 2675 goto out; 2676 2677 rc = sja1105_commit_pvid(priv); 2678 if (rc) 2679 goto out; 2680 2681 for (i = 0; i < priv->ds->num_ports; i++) 2682 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]); 2683 2684 if (notify) { 2685 rc = sja1105_notify_crosschip_switches(priv); 2686 if (rc) 2687 goto out; 2688 } 2689 2690 out: 2691 kfree(new_vlan); 2692 kfree(new_retagging); 2693 2694 return rc; 2695 } 2696 2697 /* The TPID setting belongs to the General Parameters table, 2698 * which can only be partially reconfigured at runtime (and not the TPID). 2699 * So a switch reset is required. 2700 */ 2701 int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 2702 struct netlink_ext_ack *extack) 2703 { 2704 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 2705 struct sja1105_general_params_entry *general_params; 2706 struct sja1105_private *priv = ds->priv; 2707 enum sja1105_vlan_state state; 2708 struct sja1105_table *table; 2709 struct sja1105_rule *rule; 2710 bool want_tagging; 2711 u16 tpid, tpid2; 2712 int rc; 2713 2714 list_for_each_entry(rule, &priv->flow_block.rules, list) { 2715 if (rule->type == SJA1105_RULE_VL) { 2716 NL_SET_ERR_MSG_MOD(extack, 2717 "Cannot change VLAN filtering with active VL rules"); 2718 return -EBUSY; 2719 } 2720 } 2721 2722 if (enabled) { 2723 /* Enable VLAN filtering. */ 2724 tpid = ETH_P_8021Q; 2725 tpid2 = ETH_P_8021AD; 2726 } else { 2727 /* Disable VLAN filtering. */ 2728 tpid = ETH_P_SJA1105; 2729 tpid2 = ETH_P_SJA1105; 2730 } 2731 2732 for (port = 0; port < ds->num_ports; port++) { 2733 struct sja1105_port *sp = &priv->ports[port]; 2734 2735 if (enabled) 2736 sp->xmit_tpid = priv->info->qinq_tpid; 2737 else 2738 sp->xmit_tpid = ETH_P_SJA1105; 2739 } 2740 2741 if (!enabled) 2742 state = SJA1105_VLAN_UNAWARE; 2743 else if (priv->best_effort_vlan_filtering) 2744 state = SJA1105_VLAN_BEST_EFFORT; 2745 else 2746 state = SJA1105_VLAN_FILTERING_FULL; 2747 2748 if (priv->vlan_state == state) 2749 return 0; 2750 2751 priv->vlan_state = state; 2752 want_tagging = (state == SJA1105_VLAN_UNAWARE || 2753 state == SJA1105_VLAN_BEST_EFFORT); 2754 2755 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 2756 general_params = table->entries; 2757 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 2758 general_params->tpid = tpid; 2759 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 2760 general_params->tpid2 = tpid2; 2761 /* When VLAN filtering is on, we need to at least be able to 2762 * decode management traffic through the "backup plan". 2763 */ 2764 general_params->incl_srcpt1 = enabled; 2765 general_params->incl_srcpt0 = enabled; 2766 2767 want_tagging = priv->best_effort_vlan_filtering || !enabled; 2768 2769 /* VLAN filtering => independent VLAN learning. 2770 * No VLAN filtering (or best effort) => shared VLAN learning. 2771 * 2772 * In shared VLAN learning mode, untagged traffic still gets 2773 * pvid-tagged, and the FDB table gets populated with entries 2774 * containing the "real" (pvid or from VLAN tag) VLAN ID. 2775 * However the switch performs a masked L2 lookup in the FDB, 2776 * effectively only looking up a frame's DMAC (and not VID) for the 2777 * forwarding decision. 2778 * 2779 * This is extremely convenient for us, because in modes with 2780 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 2781 * each front panel port. This is good for identification but breaks 2782 * learning badly - the VID of the learnt FDB entry is unique, aka 2783 * no frames coming from any other port are going to have it. So 2784 * for forwarding purposes, this is as though learning was broken 2785 * (all frames get flooded). 2786 */ 2787 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 2788 l2_lookup_params = table->entries; 2789 l2_lookup_params->shared_learn = want_tagging; 2790 2791 sja1105_frame_memory_partitioning(priv); 2792 2793 rc = sja1105_build_vlan_table(priv, false); 2794 if (rc) 2795 return rc; 2796 2797 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); 2798 if (rc) 2799 NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype"); 2800 2801 /* Switch port identification based on 802.1Q is only passable 2802 * if we are not under a vlan_filtering bridge. So make sure 2803 * the two configurations are mutually exclusive (of course, the 2804 * user may know better, i.e. best_effort_vlan_filtering). 2805 */ 2806 return sja1105_setup_8021q_tagging(ds, want_tagging); 2807 } 2808 2809 /* Returns number of VLANs added (0 or 1) on success, 2810 * or a negative error code. 2811 */ 2812 static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid, 2813 u16 flags, struct list_head *vlan_list) 2814 { 2815 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 2816 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 2817 struct sja1105_bridge_vlan *v; 2818 2819 list_for_each_entry(v, vlan_list, list) { 2820 if (v->port == port && v->vid == vid) { 2821 /* Already added */ 2822 if (v->untagged == untagged && v->pvid == pvid) 2823 /* Nothing changed */ 2824 return 0; 2825 2826 /* It's the same VLAN, but some of the flags changed 2827 * and the user did not bother to delete it first. 2828 * Update it and trigger sja1105_build_vlan_table. 2829 */ 2830 v->untagged = untagged; 2831 v->pvid = pvid; 2832 return 1; 2833 } 2834 } 2835 2836 v = kzalloc(sizeof(*v), GFP_KERNEL); 2837 if (!v) { 2838 dev_err(ds->dev, "Out of memory while storing VLAN\n"); 2839 return -ENOMEM; 2840 } 2841 2842 v->port = port; 2843 v->vid = vid; 2844 v->untagged = untagged; 2845 v->pvid = pvid; 2846 list_add(&v->list, vlan_list); 2847 2848 return 1; 2849 } 2850 2851 /* Returns number of VLANs deleted (0 or 1) */ 2852 static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid, 2853 struct list_head *vlan_list) 2854 { 2855 struct sja1105_bridge_vlan *v, *n; 2856 2857 list_for_each_entry_safe(v, n, vlan_list, list) { 2858 if (v->port == port && v->vid == vid) { 2859 list_del(&v->list); 2860 kfree(v); 2861 return 1; 2862 } 2863 } 2864 2865 return 0; 2866 } 2867 2868 static int sja1105_vlan_add(struct dsa_switch *ds, int port, 2869 const struct switchdev_obj_port_vlan *vlan, 2870 struct netlink_ext_ack *extack) 2871 { 2872 struct sja1105_private *priv = ds->priv; 2873 bool vlan_table_changed = false; 2874 int rc; 2875 2876 /* If the user wants best-effort VLAN filtering (aka vlan_filtering 2877 * bridge plus tagging), be sure to at least deny alterations to the 2878 * configuration done by dsa_8021q. 2879 */ 2880 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL && 2881 vid_is_dsa_8021q(vlan->vid)) { 2882 NL_SET_ERR_MSG_MOD(extack, 2883 "Range 1024-3071 reserved for dsa_8021q operation"); 2884 return -EBUSY; 2885 } 2886 2887 rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags, 2888 &priv->bridge_vlans); 2889 if (rc < 0) 2890 return rc; 2891 if (rc > 0) 2892 vlan_table_changed = true; 2893 2894 if (!vlan_table_changed) 2895 return 0; 2896 2897 return sja1105_build_vlan_table(priv, true); 2898 } 2899 2900 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 2901 const struct switchdev_obj_port_vlan *vlan) 2902 { 2903 struct sja1105_private *priv = ds->priv; 2904 bool vlan_table_changed = false; 2905 int rc; 2906 2907 rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans); 2908 if (rc > 0) 2909 vlan_table_changed = true; 2910 2911 if (!vlan_table_changed) 2912 return 0; 2913 2914 return sja1105_build_vlan_table(priv, true); 2915 } 2916 2917 static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 2918 u16 flags) 2919 { 2920 struct sja1105_private *priv = ds->priv; 2921 int rc; 2922 2923 rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans); 2924 if (rc <= 0) 2925 return rc; 2926 2927 return sja1105_build_vlan_table(priv, true); 2928 } 2929 2930 static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 2931 { 2932 struct sja1105_private *priv = ds->priv; 2933 int rc; 2934 2935 rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans); 2936 if (!rc) 2937 return 0; 2938 2939 return sja1105_build_vlan_table(priv, true); 2940 } 2941 2942 static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = { 2943 .vlan_add = sja1105_dsa_8021q_vlan_add, 2944 .vlan_del = sja1105_dsa_8021q_vlan_del, 2945 }; 2946 2947 /* The programming model for the SJA1105 switch is "all-at-once" via static 2948 * configuration tables. Some of these can be dynamically modified at runtime, 2949 * but not the xMII mode parameters table. 2950 * Furthermode, some PHYs may not have crystals for generating their clocks 2951 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 2952 * ref_clk pin. So port clocking needs to be initialized early, before 2953 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 2954 * Setting correct PHY link speed does not matter now. 2955 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 2956 * bindings are not yet parsed by DSA core. We need to parse early so that we 2957 * can populate the xMII mode parameters table. 2958 */ 2959 static int sja1105_setup(struct dsa_switch *ds) 2960 { 2961 struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; 2962 struct sja1105_private *priv = ds->priv; 2963 int rc; 2964 2965 rc = sja1105_parse_dt(priv, ports); 2966 if (rc < 0) { 2967 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 2968 return rc; 2969 } 2970 2971 /* Error out early if internal delays are required through DT 2972 * and we can't apply them. 2973 */ 2974 rc = sja1105_parse_rgmii_delays(priv, ports); 2975 if (rc < 0) { 2976 dev_err(ds->dev, "RGMII delay not supported\n"); 2977 return rc; 2978 } 2979 2980 rc = sja1105_ptp_clock_register(ds); 2981 if (rc < 0) { 2982 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 2983 return rc; 2984 } 2985 /* Create and send configuration down to device */ 2986 rc = sja1105_static_config_load(priv, ports); 2987 if (rc < 0) { 2988 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 2989 goto out_ptp_clock_unregister; 2990 } 2991 /* Configure the CGU (PHY link modes and speeds) */ 2992 rc = sja1105_clocking_setup(priv); 2993 if (rc < 0) { 2994 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 2995 goto out_static_config_free; 2996 } 2997 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 2998 * The only thing we can do to disable it is lie about what the 802.1Q 2999 * EtherType is. 3000 * So it will still try to apply VLAN filtering, but all ingress 3001 * traffic (except frames received with EtherType of ETH_P_SJA1105) 3002 * will be internally tagged with a distorted VLAN header where the 3003 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 3004 */ 3005 ds->vlan_filtering_is_global = true; 3006 3007 /* Advertise the 8 egress queues */ 3008 ds->num_tx_queues = SJA1105_NUM_TC; 3009 3010 ds->mtu_enforcement_ingress = true; 3011 3012 priv->best_effort_vlan_filtering = true; 3013 3014 rc = sja1105_devlink_setup(ds); 3015 if (rc < 0) 3016 goto out_static_config_free; 3017 3018 /* The DSA/switchdev model brings up switch ports in standalone mode by 3019 * default, and that means vlan_filtering is 0 since they're not under 3020 * a bridge, so it's safe to set up switch tagging at this time. 3021 */ 3022 rtnl_lock(); 3023 rc = sja1105_setup_8021q_tagging(ds, true); 3024 rtnl_unlock(); 3025 if (rc) 3026 goto out_devlink_teardown; 3027 3028 return 0; 3029 3030 out_devlink_teardown: 3031 sja1105_devlink_teardown(ds); 3032 out_ptp_clock_unregister: 3033 sja1105_ptp_clock_unregister(ds); 3034 out_static_config_free: 3035 sja1105_static_config_free(&priv->static_config); 3036 3037 return rc; 3038 } 3039 3040 static void sja1105_teardown(struct dsa_switch *ds) 3041 { 3042 struct sja1105_private *priv = ds->priv; 3043 struct sja1105_bridge_vlan *v, *n; 3044 int port; 3045 3046 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 3047 struct sja1105_port *sp = &priv->ports[port]; 3048 3049 if (!dsa_is_user_port(ds, port)) 3050 continue; 3051 3052 if (sp->xmit_worker) 3053 kthread_destroy_worker(sp->xmit_worker); 3054 } 3055 3056 sja1105_devlink_teardown(ds); 3057 sja1105_flower_teardown(ds); 3058 sja1105_tas_teardown(ds); 3059 sja1105_ptp_clock_unregister(ds); 3060 sja1105_static_config_free(&priv->static_config); 3061 3062 list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) { 3063 list_del(&v->list); 3064 kfree(v); 3065 } 3066 3067 list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) { 3068 list_del(&v->list); 3069 kfree(v); 3070 } 3071 } 3072 3073 static void sja1105_port_disable(struct dsa_switch *ds, int port) 3074 { 3075 struct sja1105_private *priv = ds->priv; 3076 struct sja1105_port *sp = &priv->ports[port]; 3077 3078 if (!dsa_is_user_port(ds, port)) 3079 return; 3080 3081 kthread_cancel_work_sync(&sp->xmit_work); 3082 skb_queue_purge(&sp->xmit_queue); 3083 } 3084 3085 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 3086 struct sk_buff *skb, bool takets) 3087 { 3088 struct sja1105_mgmt_entry mgmt_route = {0}; 3089 struct sja1105_private *priv = ds->priv; 3090 struct ethhdr *hdr; 3091 int timeout = 10; 3092 int rc; 3093 3094 hdr = eth_hdr(skb); 3095 3096 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 3097 mgmt_route.destports = BIT(port); 3098 mgmt_route.enfport = 1; 3099 mgmt_route.tsreg = 0; 3100 mgmt_route.takets = takets; 3101 3102 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 3103 slot, &mgmt_route, true); 3104 if (rc < 0) { 3105 kfree_skb(skb); 3106 return rc; 3107 } 3108 3109 /* Transfer skb to the host port. */ 3110 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); 3111 3112 /* Wait until the switch has processed the frame */ 3113 do { 3114 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 3115 slot, &mgmt_route); 3116 if (rc < 0) { 3117 dev_err_ratelimited(priv->ds->dev, 3118 "failed to poll for mgmt route\n"); 3119 continue; 3120 } 3121 3122 /* UM10944: The ENFPORT flag of the respective entry is 3123 * cleared when a match is found. The host can use this 3124 * flag as an acknowledgment. 3125 */ 3126 cpu_relax(); 3127 } while (mgmt_route.enfport && --timeout); 3128 3129 if (!timeout) { 3130 /* Clean up the management route so that a follow-up 3131 * frame may not match on it by mistake. 3132 * This is only hardware supported on P/Q/R/S - on E/T it is 3133 * a no-op and we are silently discarding the -EOPNOTSUPP. 3134 */ 3135 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 3136 slot, &mgmt_route, false); 3137 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 3138 } 3139 3140 return NETDEV_TX_OK; 3141 } 3142 3143 #define work_to_port(work) \ 3144 container_of((work), struct sja1105_port, xmit_work) 3145 #define tagger_to_sja1105(t) \ 3146 container_of((t), struct sja1105_private, tagger_data) 3147 3148 /* Deferred work is unfortunately necessary because setting up the management 3149 * route cannot be done from atomit context (SPI transfer takes a sleepable 3150 * lock on the bus) 3151 */ 3152 static void sja1105_port_deferred_xmit(struct kthread_work *work) 3153 { 3154 struct sja1105_port *sp = work_to_port(work); 3155 struct sja1105_tagger_data *tagger_data = sp->data; 3156 struct sja1105_private *priv = tagger_to_sja1105(tagger_data); 3157 int port = sp - priv->ports; 3158 struct sk_buff *skb; 3159 3160 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { 3161 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; 3162 3163 mutex_lock(&priv->mgmt_lock); 3164 3165 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); 3166 3167 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 3168 if (clone) 3169 sja1105_ptp_txtstamp_skb(priv->ds, port, clone); 3170 3171 mutex_unlock(&priv->mgmt_lock); 3172 } 3173 } 3174 3175 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 3176 * which cannot be reconfigured at runtime. So a switch reset is required. 3177 */ 3178 static int sja1105_set_ageing_time(struct dsa_switch *ds, 3179 unsigned int ageing_time) 3180 { 3181 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 3182 struct sja1105_private *priv = ds->priv; 3183 struct sja1105_table *table; 3184 unsigned int maxage; 3185 3186 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 3187 l2_lookup_params = table->entries; 3188 3189 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 3190 3191 if (l2_lookup_params->maxage == maxage) 3192 return 0; 3193 3194 l2_lookup_params->maxage = maxage; 3195 3196 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); 3197 } 3198 3199 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 3200 { 3201 struct sja1105_l2_policing_entry *policing; 3202 struct sja1105_private *priv = ds->priv; 3203 3204 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN; 3205 3206 if (dsa_is_cpu_port(ds, port)) 3207 new_mtu += VLAN_HLEN; 3208 3209 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3210 3211 if (policing[port].maxlen == new_mtu) 3212 return 0; 3213 3214 policing[port].maxlen = new_mtu; 3215 3216 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3217 } 3218 3219 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port) 3220 { 3221 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; 3222 } 3223 3224 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, 3225 enum tc_setup_type type, 3226 void *type_data) 3227 { 3228 switch (type) { 3229 case TC_SETUP_QDISC_TAPRIO: 3230 return sja1105_setup_tc_taprio(ds, port, type_data); 3231 case TC_SETUP_QDISC_CBS: 3232 return sja1105_setup_tc_cbs(ds, port, type_data); 3233 default: 3234 return -EOPNOTSUPP; 3235 } 3236 } 3237 3238 /* We have a single mirror (@to) port, but can configure ingress and egress 3239 * mirroring on all other (@from) ports. 3240 * We need to allow mirroring rules only as long as the @to port is always the 3241 * same, and we need to unset the @to port from mirr_port only when there is no 3242 * mirroring rule that references it. 3243 */ 3244 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, 3245 bool ingress, bool enabled) 3246 { 3247 struct sja1105_general_params_entry *general_params; 3248 struct sja1105_mac_config_entry *mac; 3249 struct sja1105_table *table; 3250 bool already_enabled; 3251 u64 new_mirr_port; 3252 int rc; 3253 3254 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 3255 general_params = table->entries; 3256 3257 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 3258 3259 already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS); 3260 if (already_enabled && enabled && general_params->mirr_port != to) { 3261 dev_err(priv->ds->dev, 3262 "Delete mirroring rules towards port %llu first\n", 3263 general_params->mirr_port); 3264 return -EBUSY; 3265 } 3266 3267 new_mirr_port = to; 3268 if (!enabled) { 3269 bool keep = false; 3270 int port; 3271 3272 /* Anybody still referencing mirr_port? */ 3273 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 3274 if (mac[port].ing_mirr || mac[port].egr_mirr) { 3275 keep = true; 3276 break; 3277 } 3278 } 3279 /* Unset already_enabled for next time */ 3280 if (!keep) 3281 new_mirr_port = SJA1105_NUM_PORTS; 3282 } 3283 if (new_mirr_port != general_params->mirr_port) { 3284 general_params->mirr_port = new_mirr_port; 3285 3286 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS, 3287 0, general_params, true); 3288 if (rc < 0) 3289 return rc; 3290 } 3291 3292 if (ingress) 3293 mac[from].ing_mirr = enabled; 3294 else 3295 mac[from].egr_mirr = enabled; 3296 3297 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from, 3298 &mac[from], true); 3299 } 3300 3301 static int sja1105_mirror_add(struct dsa_switch *ds, int port, 3302 struct dsa_mall_mirror_tc_entry *mirror, 3303 bool ingress) 3304 { 3305 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 3306 ingress, true); 3307 } 3308 3309 static void sja1105_mirror_del(struct dsa_switch *ds, int port, 3310 struct dsa_mall_mirror_tc_entry *mirror) 3311 { 3312 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 3313 mirror->ingress, false); 3314 } 3315 3316 static int sja1105_port_policer_add(struct dsa_switch *ds, int port, 3317 struct dsa_mall_policer_tc_entry *policer) 3318 { 3319 struct sja1105_l2_policing_entry *policing; 3320 struct sja1105_private *priv = ds->priv; 3321 3322 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3323 3324 /* In hardware, every 8 microseconds the credit level is incremented by 3325 * the value of RATE bytes divided by 64, up to a maximum of SMAX 3326 * bytes. 3327 */ 3328 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, 3329 1000000); 3330 policing[port].smax = policer->burst; 3331 3332 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3333 } 3334 3335 static void sja1105_port_policer_del(struct dsa_switch *ds, int port) 3336 { 3337 struct sja1105_l2_policing_entry *policing; 3338 struct sja1105_private *priv = ds->priv; 3339 3340 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 3341 3342 policing[port].rate = SJA1105_RATE_MBPS(1000); 3343 policing[port].smax = 65535; 3344 3345 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 3346 } 3347 3348 static int sja1105_port_set_learning(struct sja1105_private *priv, int port, 3349 bool enabled) 3350 { 3351 struct sja1105_mac_config_entry *mac; 3352 int rc; 3353 3354 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 3355 3356 mac[port].dyn_learn = enabled; 3357 3358 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 3359 &mac[port], true); 3360 if (rc) 3361 return rc; 3362 3363 if (enabled) 3364 priv->learn_ena |= BIT(port); 3365 else 3366 priv->learn_ena &= ~BIT(port); 3367 3368 return 0; 3369 } 3370 3371 static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to, 3372 struct switchdev_brport_flags flags) 3373 { 3374 if (flags.mask & BR_FLOOD) { 3375 if (flags.val & BR_FLOOD) 3376 priv->ucast_egress_floods |= BIT(to); 3377 else 3378 priv->ucast_egress_floods &= ~BIT(to); 3379 } 3380 3381 if (flags.mask & BR_BCAST_FLOOD) { 3382 if (flags.val & BR_BCAST_FLOOD) 3383 priv->bcast_egress_floods |= BIT(to); 3384 else 3385 priv->bcast_egress_floods &= ~BIT(to); 3386 } 3387 3388 return sja1105_manage_flood_domains(priv); 3389 } 3390 3391 static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, 3392 struct switchdev_brport_flags flags, 3393 struct netlink_ext_ack *extack) 3394 { 3395 struct sja1105_l2_lookup_entry *l2_lookup; 3396 struct sja1105_table *table; 3397 int match; 3398 3399 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 3400 l2_lookup = table->entries; 3401 3402 for (match = 0; match < table->entry_count; match++) 3403 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST && 3404 l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST) 3405 break; 3406 3407 if (match == table->entry_count) { 3408 NL_SET_ERR_MSG_MOD(extack, 3409 "Could not find FDB entry for unknown multicast"); 3410 return -ENOSPC; 3411 } 3412 3413 if (flags.val & BR_MCAST_FLOOD) 3414 l2_lookup[match].destports |= BIT(to); 3415 else 3416 l2_lookup[match].destports &= ~BIT(to); 3417 3418 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 3419 l2_lookup[match].index, 3420 &l2_lookup[match], 3421 true); 3422 } 3423 3424 static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port, 3425 struct switchdev_brport_flags flags, 3426 struct netlink_ext_ack *extack) 3427 { 3428 struct sja1105_private *priv = ds->priv; 3429 3430 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 3431 BR_BCAST_FLOOD)) 3432 return -EINVAL; 3433 3434 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) && 3435 !priv->info->can_limit_mcast_flood) { 3436 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 3437 bool unicast = !!(flags.val & BR_FLOOD); 3438 3439 if (unicast != multicast) { 3440 NL_SET_ERR_MSG_MOD(extack, 3441 "This chip cannot configure multicast flooding independently of unicast"); 3442 return -EINVAL; 3443 } 3444 } 3445 3446 return 0; 3447 } 3448 3449 static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, 3450 struct switchdev_brport_flags flags, 3451 struct netlink_ext_ack *extack) 3452 { 3453 struct sja1105_private *priv = ds->priv; 3454 int rc; 3455 3456 if (flags.mask & BR_LEARNING) { 3457 bool learn_ena = !!(flags.val & BR_LEARNING); 3458 3459 rc = sja1105_port_set_learning(priv, port, learn_ena); 3460 if (rc) 3461 return rc; 3462 } 3463 3464 if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) { 3465 rc = sja1105_port_ucast_bcast_flood(priv, port, flags); 3466 if (rc) 3467 return rc; 3468 } 3469 3470 /* For chips that can't offload BR_MCAST_FLOOD independently, there 3471 * is nothing to do here, we ensured the configuration is in sync by 3472 * offloading BR_FLOOD. 3473 */ 3474 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) { 3475 rc = sja1105_port_mcast_flood(priv, port, flags, 3476 extack); 3477 if (rc) 3478 return rc; 3479 } 3480 3481 return 0; 3482 } 3483 3484 static const struct dsa_switch_ops sja1105_switch_ops = { 3485 .get_tag_protocol = sja1105_get_tag_protocol, 3486 .setup = sja1105_setup, 3487 .teardown = sja1105_teardown, 3488 .set_ageing_time = sja1105_set_ageing_time, 3489 .port_change_mtu = sja1105_change_mtu, 3490 .port_max_mtu = sja1105_get_max_mtu, 3491 .phylink_validate = sja1105_phylink_validate, 3492 .phylink_mac_link_state = sja1105_mac_pcs_get_state, 3493 .phylink_mac_config = sja1105_mac_config, 3494 .phylink_mac_link_up = sja1105_mac_link_up, 3495 .phylink_mac_link_down = sja1105_mac_link_down, 3496 .get_strings = sja1105_get_strings, 3497 .get_ethtool_stats = sja1105_get_ethtool_stats, 3498 .get_sset_count = sja1105_get_sset_count, 3499 .get_ts_info = sja1105_get_ts_info, 3500 .port_disable = sja1105_port_disable, 3501 .port_fdb_dump = sja1105_fdb_dump, 3502 .port_fdb_add = sja1105_fdb_add, 3503 .port_fdb_del = sja1105_fdb_del, 3504 .port_bridge_join = sja1105_bridge_join, 3505 .port_bridge_leave = sja1105_bridge_leave, 3506 .port_pre_bridge_flags = sja1105_port_pre_bridge_flags, 3507 .port_bridge_flags = sja1105_port_bridge_flags, 3508 .port_stp_state_set = sja1105_bridge_stp_state_set, 3509 .port_vlan_filtering = sja1105_vlan_filtering, 3510 .port_vlan_add = sja1105_vlan_add, 3511 .port_vlan_del = sja1105_vlan_del, 3512 .port_mdb_add = sja1105_mdb_add, 3513 .port_mdb_del = sja1105_mdb_del, 3514 .port_hwtstamp_get = sja1105_hwtstamp_get, 3515 .port_hwtstamp_set = sja1105_hwtstamp_set, 3516 .port_rxtstamp = sja1105_port_rxtstamp, 3517 .port_txtstamp = sja1105_port_txtstamp, 3518 .port_setup_tc = sja1105_port_setup_tc, 3519 .port_mirror_add = sja1105_mirror_add, 3520 .port_mirror_del = sja1105_mirror_del, 3521 .port_policer_add = sja1105_port_policer_add, 3522 .port_policer_del = sja1105_port_policer_del, 3523 .cls_flower_add = sja1105_cls_flower_add, 3524 .cls_flower_del = sja1105_cls_flower_del, 3525 .cls_flower_stats = sja1105_cls_flower_stats, 3526 .crosschip_bridge_join = sja1105_crosschip_bridge_join, 3527 .crosschip_bridge_leave = sja1105_crosschip_bridge_leave, 3528 .devlink_param_get = sja1105_devlink_param_get, 3529 .devlink_param_set = sja1105_devlink_param_set, 3530 .devlink_info_get = sja1105_devlink_info_get, 3531 }; 3532 3533 static const struct of_device_id sja1105_dt_ids[]; 3534 3535 static int sja1105_check_device_id(struct sja1105_private *priv) 3536 { 3537 const struct sja1105_regs *regs = priv->info->regs; 3538 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 3539 struct device *dev = &priv->spidev->dev; 3540 const struct of_device_id *match; 3541 u32 device_id; 3542 u64 part_no; 3543 int rc; 3544 3545 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, 3546 NULL); 3547 if (rc < 0) 3548 return rc; 3549 3550 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, 3551 SJA1105_SIZE_DEVICE_ID); 3552 if (rc < 0) 3553 return rc; 3554 3555 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 3556 3557 for (match = sja1105_dt_ids; match->compatible[0]; match++) { 3558 const struct sja1105_info *info = match->data; 3559 3560 /* Is what's been probed in our match table at all? */ 3561 if (info->device_id != device_id || info->part_no != part_no) 3562 continue; 3563 3564 /* But is it what's in the device tree? */ 3565 if (priv->info->device_id != device_id || 3566 priv->info->part_no != part_no) { 3567 dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n", 3568 priv->info->name, info->name); 3569 /* It isn't. No problem, pick that up. */ 3570 priv->info = info; 3571 } 3572 3573 return 0; 3574 } 3575 3576 dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n", 3577 device_id, part_no); 3578 3579 return -ENODEV; 3580 } 3581 3582 static int sja1105_probe(struct spi_device *spi) 3583 { 3584 struct sja1105_tagger_data *tagger_data; 3585 struct device *dev = &spi->dev; 3586 struct sja1105_private *priv; 3587 struct dsa_switch *ds; 3588 int rc, port; 3589 3590 if (!dev->of_node) { 3591 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 3592 return -EINVAL; 3593 } 3594 3595 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 3596 if (!priv) 3597 return -ENOMEM; 3598 3599 /* Configure the optional reset pin and bring up switch */ 3600 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 3601 if (IS_ERR(priv->reset_gpio)) 3602 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 3603 else 3604 sja1105_hw_reset(priv->reset_gpio, 1, 1); 3605 3606 /* Populate our driver private structure (priv) based on 3607 * the device tree node that was probed (spi) 3608 */ 3609 priv->spidev = spi; 3610 spi_set_drvdata(spi, priv); 3611 3612 /* Configure the SPI bus */ 3613 spi->bits_per_word = 8; 3614 rc = spi_setup(spi); 3615 if (rc < 0) { 3616 dev_err(dev, "Could not init SPI\n"); 3617 return rc; 3618 } 3619 3620 priv->info = of_device_get_match_data(dev); 3621 3622 /* Detect hardware device */ 3623 rc = sja1105_check_device_id(priv); 3624 if (rc < 0) { 3625 dev_err(dev, "Device ID check failed: %d\n", rc); 3626 return rc; 3627 } 3628 3629 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 3630 3631 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); 3632 if (!ds) 3633 return -ENOMEM; 3634 3635 ds->dev = dev; 3636 ds->num_ports = SJA1105_NUM_PORTS; 3637 ds->ops = &sja1105_switch_ops; 3638 ds->priv = priv; 3639 priv->ds = ds; 3640 3641 tagger_data = &priv->tagger_data; 3642 3643 mutex_init(&priv->ptp_data.lock); 3644 mutex_init(&priv->mgmt_lock); 3645 3646 priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx), 3647 GFP_KERNEL); 3648 if (!priv->dsa_8021q_ctx) 3649 return -ENOMEM; 3650 3651 priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops; 3652 priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q); 3653 priv->dsa_8021q_ctx->ds = ds; 3654 3655 INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links); 3656 INIT_LIST_HEAD(&priv->bridge_vlans); 3657 INIT_LIST_HEAD(&priv->dsa_8021q_vlans); 3658 3659 sja1105_tas_setup(ds); 3660 sja1105_flower_setup(ds); 3661 3662 rc = dsa_register_switch(priv->ds); 3663 if (rc) 3664 return rc; 3665 3666 if (IS_ENABLED(CONFIG_NET_SCH_CBS)) { 3667 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, 3668 sizeof(struct sja1105_cbs_entry), 3669 GFP_KERNEL); 3670 if (!priv->cbs) { 3671 rc = -ENOMEM; 3672 goto out_unregister_switch; 3673 } 3674 } 3675 3676 /* Connections between dsa_port and sja1105_port */ 3677 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 3678 struct sja1105_port *sp = &priv->ports[port]; 3679 struct dsa_port *dp = dsa_to_port(ds, port); 3680 struct net_device *slave; 3681 int subvlan; 3682 3683 if (!dsa_is_user_port(ds, port)) 3684 continue; 3685 3686 dp->priv = sp; 3687 sp->dp = dp; 3688 sp->data = tagger_data; 3689 slave = dp->slave; 3690 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); 3691 sp->xmit_worker = kthread_create_worker(0, "%s_xmit", 3692 slave->name); 3693 if (IS_ERR(sp->xmit_worker)) { 3694 rc = PTR_ERR(sp->xmit_worker); 3695 dev_err(ds->dev, 3696 "failed to create deferred xmit thread: %d\n", 3697 rc); 3698 goto out_destroy_workers; 3699 } 3700 skb_queue_head_init(&sp->xmit_queue); 3701 sp->xmit_tpid = ETH_P_SJA1105; 3702 3703 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) 3704 sp->subvlan_map[subvlan] = VLAN_N_VID; 3705 } 3706 3707 return 0; 3708 3709 out_destroy_workers: 3710 while (port-- > 0) { 3711 struct sja1105_port *sp = &priv->ports[port]; 3712 3713 if (!dsa_is_user_port(ds, port)) 3714 continue; 3715 3716 kthread_destroy_worker(sp->xmit_worker); 3717 } 3718 3719 out_unregister_switch: 3720 dsa_unregister_switch(ds); 3721 3722 return rc; 3723 } 3724 3725 static int sja1105_remove(struct spi_device *spi) 3726 { 3727 struct sja1105_private *priv = spi_get_drvdata(spi); 3728 3729 dsa_unregister_switch(priv->ds); 3730 return 0; 3731 } 3732 3733 static const struct of_device_id sja1105_dt_ids[] = { 3734 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 3735 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 3736 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 3737 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 3738 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 3739 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 3740 { /* sentinel */ }, 3741 }; 3742 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 3743 3744 static struct spi_driver sja1105_driver = { 3745 .driver = { 3746 .name = "sja1105", 3747 .owner = THIS_MODULE, 3748 .of_match_table = of_match_ptr(sja1105_dt_ids), 3749 }, 3750 .probe = sja1105_probe, 3751 .remove = sja1105_remove, 3752 }; 3753 3754 module_spi_driver(sja1105_driver); 3755 3756 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 3757 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 3758 MODULE_DESCRIPTION("SJA1105 Driver"); 3759 MODULE_LICENSE("GPL v2"); 3760