1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 26 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 27 unsigned int startup_delay) 28 { 29 gpiod_set_value_cansleep(gpio, 1); 30 /* Wait for minimum reset pulse length */ 31 msleep(pulse_len); 32 gpiod_set_value_cansleep(gpio, 0); 33 /* Wait until chip is ready after reset */ 34 msleep(startup_delay); 35 } 36 37 static void 38 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 39 int from, int to, bool allow) 40 { 41 if (allow) { 42 l2_fwd[from].bc_domain |= BIT(to); 43 l2_fwd[from].reach_port |= BIT(to); 44 l2_fwd[from].fl_domain |= BIT(to); 45 } else { 46 l2_fwd[from].bc_domain &= ~BIT(to); 47 l2_fwd[from].reach_port &= ~BIT(to); 48 l2_fwd[from].fl_domain &= ~BIT(to); 49 } 50 } 51 52 /* Structure used to temporarily transport device tree 53 * settings into sja1105_setup 54 */ 55 struct sja1105_dt_port { 56 phy_interface_t phy_mode; 57 sja1105_mii_role_t role; 58 }; 59 60 static int sja1105_init_mac_settings(struct sja1105_private *priv) 61 { 62 struct sja1105_mac_config_entry default_mac = { 63 /* Enable all 8 priority queues on egress. 64 * Every queue i holds top[i] - base[i] frames. 65 * Sum of top[i] - base[i] is 511 (max hardware limit). 66 */ 67 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 68 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 69 .enabled = {true, true, true, true, true, true, true, true}, 70 /* Keep standard IFG of 12 bytes on egress. */ 71 .ifg = 0, 72 /* Always put the MAC speed in automatic mode, where it can be 73 * adjusted at runtime by PHYLINK. 74 */ 75 .speed = SJA1105_SPEED_AUTO, 76 /* No static correction for 1-step 1588 events */ 77 .tp_delin = 0, 78 .tp_delout = 0, 79 /* Disable aging for critical TTEthernet traffic */ 80 .maxage = 0xFF, 81 /* Internal VLAN (pvid) to apply to untagged ingress */ 82 .vlanprio = 0, 83 .vlanid = 1, 84 .ing_mirr = false, 85 .egr_mirr = false, 86 /* Don't drop traffic with other EtherType than ETH_P_IP */ 87 .drpnona664 = false, 88 /* Don't drop double-tagged traffic */ 89 .drpdtag = false, 90 /* Don't drop untagged traffic */ 91 .drpuntag = false, 92 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 93 .retag = false, 94 /* Disable learning and I/O on user ports by default - 95 * STP will enable it. 96 */ 97 .dyn_learn = false, 98 .egress = false, 99 .ingress = false, 100 }; 101 struct sja1105_mac_config_entry *mac; 102 struct sja1105_table *table; 103 int i; 104 105 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 106 107 /* Discard previous MAC Configuration Table */ 108 if (table->entry_count) { 109 kfree(table->entries); 110 table->entry_count = 0; 111 } 112 113 table->entries = kcalloc(SJA1105_NUM_PORTS, 114 table->ops->unpacked_entry_size, GFP_KERNEL); 115 if (!table->entries) 116 return -ENOMEM; 117 118 table->entry_count = SJA1105_NUM_PORTS; 119 120 mac = table->entries; 121 122 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 123 mac[i] = default_mac; 124 if (i == dsa_upstream_port(priv->ds, i)) { 125 /* STP doesn't get called for CPU port, so we need to 126 * set the I/O parameters statically. 127 */ 128 mac[i].dyn_learn = true; 129 mac[i].ingress = true; 130 mac[i].egress = true; 131 } 132 } 133 134 return 0; 135 } 136 137 static int sja1105_init_mii_settings(struct sja1105_private *priv, 138 struct sja1105_dt_port *ports) 139 { 140 struct device *dev = &priv->spidev->dev; 141 struct sja1105_xmii_params_entry *mii; 142 struct sja1105_table *table; 143 int i; 144 145 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 146 147 /* Discard previous xMII Mode Parameters Table */ 148 if (table->entry_count) { 149 kfree(table->entries); 150 table->entry_count = 0; 151 } 152 153 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, 154 table->ops->unpacked_entry_size, GFP_KERNEL); 155 if (!table->entries) 156 return -ENOMEM; 157 158 /* Override table based on PHYLINK DT bindings */ 159 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; 160 161 mii = table->entries; 162 163 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 164 switch (ports[i].phy_mode) { 165 case PHY_INTERFACE_MODE_MII: 166 mii->xmii_mode[i] = XMII_MODE_MII; 167 break; 168 case PHY_INTERFACE_MODE_RMII: 169 mii->xmii_mode[i] = XMII_MODE_RMII; 170 break; 171 case PHY_INTERFACE_MODE_RGMII: 172 case PHY_INTERFACE_MODE_RGMII_ID: 173 case PHY_INTERFACE_MODE_RGMII_RXID: 174 case PHY_INTERFACE_MODE_RGMII_TXID: 175 mii->xmii_mode[i] = XMII_MODE_RGMII; 176 break; 177 default: 178 dev_err(dev, "Unsupported PHY mode %s!\n", 179 phy_modes(ports[i].phy_mode)); 180 } 181 182 mii->phy_mac[i] = ports[i].role; 183 } 184 return 0; 185 } 186 187 static int sja1105_init_static_fdb(struct sja1105_private *priv) 188 { 189 struct sja1105_table *table; 190 191 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 192 193 /* We only populate the FDB table through dynamic 194 * L2 Address Lookup entries 195 */ 196 if (table->entry_count) { 197 kfree(table->entries); 198 table->entry_count = 0; 199 } 200 return 0; 201 } 202 203 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 204 { 205 struct sja1105_table *table; 206 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS; 207 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 208 /* Learned FDB entries are forgotten after 300 seconds */ 209 .maxage = SJA1105_AGEING_TIME_MS(300000), 210 /* All entries within a FDB bin are available for learning */ 211 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 212 /* And the P/Q/R/S equivalent setting: */ 213 .start_dynspc = 0, 214 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries, 215 max_fdb_entries, max_fdb_entries, }, 216 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 217 .poly = 0x97, 218 /* This selects between Independent VLAN Learning (IVL) and 219 * Shared VLAN Learning (SVL) 220 */ 221 .shared_learn = true, 222 /* Don't discard management traffic based on ENFPORT - 223 * we don't perform SMAC port enforcement anyway, so 224 * what we are setting here doesn't matter. 225 */ 226 .no_enf_hostprt = false, 227 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 228 * Maybe correlate with no_linklocal_learn from bridge driver? 229 */ 230 .no_mgmt_learn = true, 231 /* P/Q/R/S only */ 232 .use_static = true, 233 /* Dynamically learned FDB entries can overwrite other (older) 234 * dynamic FDB entries 235 */ 236 .owr_dyn = true, 237 .drpnolearn = true, 238 }; 239 240 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 241 242 if (table->entry_count) { 243 kfree(table->entries); 244 table->entry_count = 0; 245 } 246 247 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, 248 table->ops->unpacked_entry_size, GFP_KERNEL); 249 if (!table->entries) 250 return -ENOMEM; 251 252 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; 253 254 /* This table only has a single entry */ 255 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 256 default_l2_lookup_params; 257 258 return 0; 259 } 260 261 static int sja1105_init_static_vlan(struct sja1105_private *priv) 262 { 263 struct sja1105_table *table; 264 struct sja1105_vlan_lookup_entry pvid = { 265 .ving_mirr = 0, 266 .vegr_mirr = 0, 267 .vmemb_port = 0, 268 .vlan_bc = 0, 269 .tag_port = 0, 270 .vlanid = 1, 271 }; 272 int i; 273 274 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 275 276 /* The static VLAN table will only contain the initial pvid of 1. 277 * All other VLANs are to be configured through dynamic entries, 278 * and kept in the static configuration table as backing memory. 279 */ 280 if (table->entry_count) { 281 kfree(table->entries); 282 table->entry_count = 0; 283 } 284 285 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 286 GFP_KERNEL); 287 if (!table->entries) 288 return -ENOMEM; 289 290 table->entry_count = 1; 291 292 /* VLAN 1: all DT-defined ports are members; no restrictions on 293 * forwarding; always transmit priority-tagged frames as untagged. 294 */ 295 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 296 pvid.vmemb_port |= BIT(i); 297 pvid.vlan_bc |= BIT(i); 298 pvid.tag_port &= ~BIT(i); 299 } 300 301 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 302 return 0; 303 } 304 305 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 306 { 307 struct sja1105_l2_forwarding_entry *l2fwd; 308 struct sja1105_table *table; 309 int i, j; 310 311 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 312 313 if (table->entry_count) { 314 kfree(table->entries); 315 table->entry_count = 0; 316 } 317 318 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, 319 table->ops->unpacked_entry_size, GFP_KERNEL); 320 if (!table->entries) 321 return -ENOMEM; 322 323 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; 324 325 l2fwd = table->entries; 326 327 /* First 5 entries define the forwarding rules */ 328 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 329 unsigned int upstream = dsa_upstream_port(priv->ds, i); 330 331 for (j = 0; j < SJA1105_NUM_TC; j++) 332 l2fwd[i].vlan_pmap[j] = j; 333 334 if (i == upstream) 335 continue; 336 337 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 338 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 339 } 340 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 341 * Create a one-to-one mapping. 342 */ 343 for (i = 0; i < SJA1105_NUM_TC; i++) 344 for (j = 0; j < SJA1105_NUM_PORTS; j++) 345 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; 346 347 return 0; 348 } 349 350 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 351 { 352 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { 353 /* Disallow dynamic reconfiguration of vlan_pmap */ 354 .max_dynp = 0, 355 /* Use a single memory partition for all ingress queues */ 356 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, 357 }; 358 struct sja1105_table *table; 359 360 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 361 362 if (table->entry_count) { 363 kfree(table->entries); 364 table->entry_count = 0; 365 } 366 367 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, 368 table->ops->unpacked_entry_size, GFP_KERNEL); 369 if (!table->entries) 370 return -ENOMEM; 371 372 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; 373 374 /* This table only has a single entry */ 375 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = 376 default_l2fwd_params; 377 378 return 0; 379 } 380 381 static int sja1105_init_general_params(struct sja1105_private *priv) 382 { 383 struct sja1105_general_params_entry default_general_params = { 384 /* Disallow dynamic changing of the mirror port */ 385 .mirr_ptacu = 0, 386 .switchid = priv->ds->index, 387 /* Priority queue for link-local frames trapped to CPU */ 388 .hostprio = 7, 389 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 390 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 391 .incl_srcpt1 = false, 392 .send_meta1 = false, 393 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 394 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 395 .incl_srcpt0 = false, 396 .send_meta0 = false, 397 /* The destination for traffic matching mac_fltres1 and 398 * mac_fltres0 on all ports except host_port. Such traffic 399 * receieved on host_port itself would be dropped, except 400 * by installing a temporary 'management route' 401 */ 402 .host_port = dsa_upstream_port(priv->ds, 0), 403 /* Same as host port */ 404 .mirr_port = dsa_upstream_port(priv->ds, 0), 405 /* Link-local traffic received on casc_port will be forwarded 406 * to host_port without embedding the source port and device ID 407 * info in the destination MAC address (presumably because it 408 * is a cascaded port and a downstream SJA switch already did 409 * that). Default to an invalid port (to disable the feature) 410 * and overwrite this if we find any DSA (cascaded) ports. 411 */ 412 .casc_port = SJA1105_NUM_PORTS, 413 /* No TTEthernet */ 414 .vllupformat = 0, 415 .vlmarker = 0, 416 .vlmask = 0, 417 /* Only update correctionField for 1-step PTP (L2 transport) */ 418 .ignore2stf = 0, 419 /* Forcefully disable VLAN filtering by telling 420 * the switch that VLAN has a different EtherType. 421 */ 422 .tpid = ETH_P_SJA1105, 423 .tpid2 = ETH_P_SJA1105, 424 }; 425 struct sja1105_table *table; 426 int i, k = 0; 427 428 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 429 if (dsa_is_dsa_port(priv->ds, i)) 430 default_general_params.casc_port = i; 431 else if (dsa_is_user_port(priv->ds, i)) 432 priv->ports[i].mgmt_slot = k++; 433 } 434 435 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 436 437 if (table->entry_count) { 438 kfree(table->entries); 439 table->entry_count = 0; 440 } 441 442 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, 443 table->ops->unpacked_entry_size, GFP_KERNEL); 444 if (!table->entries) 445 return -ENOMEM; 446 447 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; 448 449 /* This table only has a single entry */ 450 ((struct sja1105_general_params_entry *)table->entries)[0] = 451 default_general_params; 452 453 return 0; 454 } 455 456 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 457 458 static inline void 459 sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, 460 int index) 461 { 462 policing[index].sharindx = index; 463 policing[index].smax = 65535; /* Burst size in bytes */ 464 policing[index].rate = SJA1105_RATE_MBPS(1000); 465 policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 466 policing[index].partition = 0; 467 } 468 469 static int sja1105_init_l2_policing(struct sja1105_private *priv) 470 { 471 struct sja1105_l2_policing_entry *policing; 472 struct sja1105_table *table; 473 int i, j, k; 474 475 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 476 477 /* Discard previous L2 Policing Table */ 478 if (table->entry_count) { 479 kfree(table->entries); 480 table->entry_count = 0; 481 } 482 483 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, 484 table->ops->unpacked_entry_size, GFP_KERNEL); 485 if (!table->entries) 486 return -ENOMEM; 487 488 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; 489 490 policing = table->entries; 491 492 /* k sweeps through all unicast policers (0-39). 493 * bcast sweeps through policers 40-44. 494 */ 495 for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) { 496 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i; 497 498 for (j = 0; j < SJA1105_NUM_TC; j++, k++) 499 sja1105_setup_policer(policing, k); 500 501 /* Set up this port's policer for broadcast traffic */ 502 sja1105_setup_policer(policing, bcast); 503 } 504 return 0; 505 } 506 507 static int sja1105_init_avb_params(struct sja1105_private *priv, 508 bool on) 509 { 510 struct sja1105_avb_params_entry *avb; 511 struct sja1105_table *table; 512 513 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 514 515 /* Discard previous AVB Parameters Table */ 516 if (table->entry_count) { 517 kfree(table->entries); 518 table->entry_count = 0; 519 } 520 521 /* Configure the reception of meta frames only if requested */ 522 if (!on) 523 return 0; 524 525 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, 526 table->ops->unpacked_entry_size, GFP_KERNEL); 527 if (!table->entries) 528 return -ENOMEM; 529 530 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; 531 532 avb = table->entries; 533 534 avb->destmeta = SJA1105_META_DMAC; 535 avb->srcmeta = SJA1105_META_SMAC; 536 537 return 0; 538 } 539 540 static int sja1105_static_config_load(struct sja1105_private *priv, 541 struct sja1105_dt_port *ports) 542 { 543 int rc; 544 545 sja1105_static_config_free(&priv->static_config); 546 rc = sja1105_static_config_init(&priv->static_config, 547 priv->info->static_ops, 548 priv->info->device_id); 549 if (rc) 550 return rc; 551 552 /* Build static configuration */ 553 rc = sja1105_init_mac_settings(priv); 554 if (rc < 0) 555 return rc; 556 rc = sja1105_init_mii_settings(priv, ports); 557 if (rc < 0) 558 return rc; 559 rc = sja1105_init_static_fdb(priv); 560 if (rc < 0) 561 return rc; 562 rc = sja1105_init_static_vlan(priv); 563 if (rc < 0) 564 return rc; 565 rc = sja1105_init_l2_lookup_params(priv); 566 if (rc < 0) 567 return rc; 568 rc = sja1105_init_l2_forwarding(priv); 569 if (rc < 0) 570 return rc; 571 rc = sja1105_init_l2_forwarding_params(priv); 572 if (rc < 0) 573 return rc; 574 rc = sja1105_init_l2_policing(priv); 575 if (rc < 0) 576 return rc; 577 rc = sja1105_init_general_params(priv); 578 if (rc < 0) 579 return rc; 580 rc = sja1105_init_avb_params(priv, false); 581 if (rc < 0) 582 return rc; 583 584 /* Send initial configuration to hardware via SPI */ 585 return sja1105_static_config_upload(priv); 586 } 587 588 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 589 const struct sja1105_dt_port *ports) 590 { 591 int i; 592 593 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 594 if (ports->role == XMII_MAC) 595 continue; 596 597 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 598 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 599 priv->rgmii_rx_delay[i] = true; 600 601 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 602 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 603 priv->rgmii_tx_delay[i] = true; 604 605 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 606 !priv->info->setup_rgmii_delay) 607 return -EINVAL; 608 } 609 return 0; 610 } 611 612 static int sja1105_parse_ports_node(struct sja1105_private *priv, 613 struct sja1105_dt_port *ports, 614 struct device_node *ports_node) 615 { 616 struct device *dev = &priv->spidev->dev; 617 struct device_node *child; 618 619 for_each_child_of_node(ports_node, child) { 620 struct device_node *phy_node; 621 int phy_mode; 622 u32 index; 623 624 /* Get switch port number from DT */ 625 if (of_property_read_u32(child, "reg", &index) < 0) { 626 dev_err(dev, "Port number not defined in device tree " 627 "(property \"reg\")\n"); 628 of_node_put(child); 629 return -ENODEV; 630 } 631 632 /* Get PHY mode from DT */ 633 phy_mode = of_get_phy_mode(child); 634 if (phy_mode < 0) { 635 dev_err(dev, "Failed to read phy-mode or " 636 "phy-interface-type property for port %d\n", 637 index); 638 of_node_put(child); 639 return -ENODEV; 640 } 641 ports[index].phy_mode = phy_mode; 642 643 phy_node = of_parse_phandle(child, "phy-handle", 0); 644 if (!phy_node) { 645 if (!of_phy_is_fixed_link(child)) { 646 dev_err(dev, "phy-handle or fixed-link " 647 "properties missing!\n"); 648 of_node_put(child); 649 return -ENODEV; 650 } 651 /* phy-handle is missing, but fixed-link isn't. 652 * So it's a fixed link. Default to PHY role. 653 */ 654 ports[index].role = XMII_PHY; 655 } else { 656 /* phy-handle present => put port in MAC role */ 657 ports[index].role = XMII_MAC; 658 of_node_put(phy_node); 659 } 660 661 /* The MAC/PHY role can be overridden with explicit bindings */ 662 if (of_property_read_bool(child, "sja1105,role-mac")) 663 ports[index].role = XMII_MAC; 664 else if (of_property_read_bool(child, "sja1105,role-phy")) 665 ports[index].role = XMII_PHY; 666 } 667 668 return 0; 669 } 670 671 static int sja1105_parse_dt(struct sja1105_private *priv, 672 struct sja1105_dt_port *ports) 673 { 674 struct device *dev = &priv->spidev->dev; 675 struct device_node *switch_node = dev->of_node; 676 struct device_node *ports_node; 677 int rc; 678 679 ports_node = of_get_child_by_name(switch_node, "ports"); 680 if (!ports_node) { 681 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 682 return -ENODEV; 683 } 684 685 rc = sja1105_parse_ports_node(priv, ports, ports_node); 686 of_node_put(ports_node); 687 688 return rc; 689 } 690 691 /* Convert link speed from SJA1105 to ethtool encoding */ 692 static int sja1105_speed[] = { 693 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN, 694 [SJA1105_SPEED_10MBPS] = SPEED_10, 695 [SJA1105_SPEED_100MBPS] = SPEED_100, 696 [SJA1105_SPEED_1000MBPS] = SPEED_1000, 697 }; 698 699 /* Set link speed in the MAC configuration for a specific port. */ 700 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 701 int speed_mbps) 702 { 703 struct sja1105_xmii_params_entry *mii; 704 struct sja1105_mac_config_entry *mac; 705 struct device *dev = priv->ds->dev; 706 sja1105_phy_interface_t phy_mode; 707 sja1105_speed_t speed; 708 int rc; 709 710 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 711 * tables. On E/T, MAC reconfig tables are not readable, only writable. 712 * We have to *know* what the MAC looks like. For the sake of keeping 713 * the code common, we'll use the static configuration tables as a 714 * reasonable approximation for both E/T and P/Q/R/S. 715 */ 716 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 717 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 718 719 switch (speed_mbps) { 720 case SPEED_UNKNOWN: 721 /* PHYLINK called sja1105_mac_config() to inform us about 722 * the state->interface, but AN has not completed and the 723 * speed is not yet valid. UM10944.pdf says that setting 724 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 725 * ok for power consumption in case AN will never complete - 726 * otherwise PHYLINK should come back with a new update. 727 */ 728 speed = SJA1105_SPEED_AUTO; 729 break; 730 case SPEED_10: 731 speed = SJA1105_SPEED_10MBPS; 732 break; 733 case SPEED_100: 734 speed = SJA1105_SPEED_100MBPS; 735 break; 736 case SPEED_1000: 737 speed = SJA1105_SPEED_1000MBPS; 738 break; 739 default: 740 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 741 return -EINVAL; 742 } 743 744 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 745 * table, since this will be used for the clocking setup, and we no 746 * longer need to store it in the static config (already told hardware 747 * we want auto during upload phase). 748 */ 749 mac[port].speed = speed; 750 751 /* Write to the dynamic reconfiguration tables */ 752 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 753 &mac[port], true); 754 if (rc < 0) { 755 dev_err(dev, "Failed to write MAC config: %d\n", rc); 756 return rc; 757 } 758 759 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 760 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 761 * RMII no change of the clock setup is required. Actually, changing 762 * the clock setup does interrupt the clock signal for a certain time 763 * which causes trouble for all PHYs relying on this signal. 764 */ 765 phy_mode = mii->xmii_mode[port]; 766 if (phy_mode != XMII_MODE_RGMII) 767 return 0; 768 769 return sja1105_clocking_setup_port(priv, port); 770 } 771 772 /* The SJA1105 MAC programming model is through the static config (the xMII 773 * Mode table cannot be dynamically reconfigured), and we have to program 774 * that early (earlier than PHYLINK calls us, anyway). 775 * So just error out in case the connected PHY attempts to change the initial 776 * system interface MII protocol from what is defined in the DT, at least for 777 * now. 778 */ 779 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 780 phy_interface_t interface) 781 { 782 struct sja1105_xmii_params_entry *mii; 783 sja1105_phy_interface_t phy_mode; 784 785 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 786 phy_mode = mii->xmii_mode[port]; 787 788 switch (interface) { 789 case PHY_INTERFACE_MODE_MII: 790 return (phy_mode != XMII_MODE_MII); 791 case PHY_INTERFACE_MODE_RMII: 792 return (phy_mode != XMII_MODE_RMII); 793 case PHY_INTERFACE_MODE_RGMII: 794 case PHY_INTERFACE_MODE_RGMII_ID: 795 case PHY_INTERFACE_MODE_RGMII_RXID: 796 case PHY_INTERFACE_MODE_RGMII_TXID: 797 return (phy_mode != XMII_MODE_RGMII); 798 default: 799 return true; 800 } 801 } 802 803 static void sja1105_mac_config(struct dsa_switch *ds, int port, 804 unsigned int link_an_mode, 805 const struct phylink_link_state *state) 806 { 807 struct sja1105_private *priv = ds->priv; 808 809 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) 810 return; 811 812 if (link_an_mode == MLO_AN_INBAND) { 813 dev_err(ds->dev, "In-band AN not supported!\n"); 814 return; 815 } 816 817 sja1105_adjust_port_config(priv, port, state->speed); 818 } 819 820 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 821 unsigned int mode, 822 phy_interface_t interface) 823 { 824 sja1105_inhibit_tx(ds->priv, BIT(port), true); 825 } 826 827 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 828 unsigned int mode, 829 phy_interface_t interface, 830 struct phy_device *phydev) 831 { 832 sja1105_inhibit_tx(ds->priv, BIT(port), false); 833 } 834 835 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 836 unsigned long *supported, 837 struct phylink_link_state *state) 838 { 839 /* Construct a new mask which exhaustively contains all link features 840 * supported by the MAC, and then apply that (logical AND) to what will 841 * be sent to the PHY for "marketing". 842 */ 843 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 844 struct sja1105_private *priv = ds->priv; 845 struct sja1105_xmii_params_entry *mii; 846 847 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 848 849 /* include/linux/phylink.h says: 850 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 851 * expects the MAC driver to return all supported link modes. 852 */ 853 if (state->interface != PHY_INTERFACE_MODE_NA && 854 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 855 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 856 return; 857 } 858 859 /* The MAC does not support pause frames, and also doesn't 860 * support half-duplex traffic modes. 861 */ 862 phylink_set(mask, Autoneg); 863 phylink_set(mask, MII); 864 phylink_set(mask, 10baseT_Full); 865 phylink_set(mask, 100baseT_Full); 866 if (mii->xmii_mode[port] == XMII_MODE_RGMII) 867 phylink_set(mask, 1000baseT_Full); 868 869 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 870 bitmap_and(state->advertising, state->advertising, mask, 871 __ETHTOOL_LINK_MODE_MASK_NBITS); 872 } 873 874 static int 875 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 876 const struct sja1105_l2_lookup_entry *requested) 877 { 878 struct sja1105_l2_lookup_entry *l2_lookup; 879 struct sja1105_table *table; 880 int i; 881 882 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 883 l2_lookup = table->entries; 884 885 for (i = 0; i < table->entry_count; i++) 886 if (l2_lookup[i].macaddr == requested->macaddr && 887 l2_lookup[i].vlanid == requested->vlanid && 888 l2_lookup[i].destports & BIT(port)) 889 return i; 890 891 return -1; 892 } 893 894 /* We want FDB entries added statically through the bridge command to persist 895 * across switch resets, which are a common thing during normal SJA1105 896 * operation. So we have to back them up in the static configuration tables 897 * and hence apply them on next static config upload... yay! 898 */ 899 static int 900 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 901 const struct sja1105_l2_lookup_entry *requested, 902 bool keep) 903 { 904 struct sja1105_l2_lookup_entry *l2_lookup; 905 struct sja1105_table *table; 906 int rc, match; 907 908 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 909 910 match = sja1105_find_static_fdb_entry(priv, port, requested); 911 if (match < 0) { 912 /* Can't delete a missing entry. */ 913 if (!keep) 914 return 0; 915 916 /* No match => new entry */ 917 rc = sja1105_table_resize(table, table->entry_count + 1); 918 if (rc) 919 return rc; 920 921 match = table->entry_count - 1; 922 } 923 924 /* Assign pointer after the resize (it may be new memory) */ 925 l2_lookup = table->entries; 926 927 /* We have a match. 928 * If the job was to add this FDB entry, it's already done (mostly 929 * anyway, since the port forwarding mask may have changed, case in 930 * which we update it). 931 * Otherwise we have to delete it. 932 */ 933 if (keep) { 934 l2_lookup[match] = *requested; 935 return 0; 936 } 937 938 /* To remove, the strategy is to overwrite the element with 939 * the last one, and then reduce the array size by 1 940 */ 941 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 942 return sja1105_table_resize(table, table->entry_count - 1); 943 } 944 945 /* First-generation switches have a 4-way set associative TCAM that 946 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 947 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 948 * For the placement of a newly learnt FDB entry, the switch selects the bin 949 * based on a hash function, and the way within that bin incrementally. 950 */ 951 static inline int sja1105et_fdb_index(int bin, int way) 952 { 953 return bin * SJA1105ET_FDB_BIN_SIZE + way; 954 } 955 956 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 957 const u8 *addr, u16 vid, 958 struct sja1105_l2_lookup_entry *match, 959 int *last_unused) 960 { 961 int way; 962 963 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 964 struct sja1105_l2_lookup_entry l2_lookup = {0}; 965 int index = sja1105et_fdb_index(bin, way); 966 967 /* Skip unused entries, optionally marking them 968 * into the return value 969 */ 970 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 971 index, &l2_lookup)) { 972 if (last_unused) 973 *last_unused = way; 974 continue; 975 } 976 977 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 978 l2_lookup.vlanid == vid) { 979 if (match) 980 *match = l2_lookup; 981 return way; 982 } 983 } 984 /* Return an invalid entry index if not found */ 985 return -1; 986 } 987 988 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 989 const unsigned char *addr, u16 vid) 990 { 991 struct sja1105_l2_lookup_entry l2_lookup = {0}; 992 struct sja1105_private *priv = ds->priv; 993 struct device *dev = ds->dev; 994 int last_unused = -1; 995 int bin, way, rc; 996 997 bin = sja1105et_fdb_hash(priv, addr, vid); 998 999 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1000 &l2_lookup, &last_unused); 1001 if (way >= 0) { 1002 /* We have an FDB entry. Is our port in the destination 1003 * mask? If yes, we need to do nothing. If not, we need 1004 * to rewrite the entry by adding this port to it. 1005 */ 1006 if (l2_lookup.destports & BIT(port)) 1007 return 0; 1008 l2_lookup.destports |= BIT(port); 1009 } else { 1010 int index = sja1105et_fdb_index(bin, way); 1011 1012 /* We don't have an FDB entry. We construct a new one and 1013 * try to find a place for it within the FDB table. 1014 */ 1015 l2_lookup.macaddr = ether_addr_to_u64(addr); 1016 l2_lookup.destports = BIT(port); 1017 l2_lookup.vlanid = vid; 1018 1019 if (last_unused >= 0) { 1020 way = last_unused; 1021 } else { 1022 /* Bin is full, need to evict somebody. 1023 * Choose victim at random. If you get these messages 1024 * often, you may need to consider changing the 1025 * distribution function: 1026 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1027 */ 1028 get_random_bytes(&way, sizeof(u8)); 1029 way %= SJA1105ET_FDB_BIN_SIZE; 1030 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1031 bin, addr, way); 1032 /* Evict entry */ 1033 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1034 index, NULL, false); 1035 } 1036 } 1037 l2_lookup.index = sja1105et_fdb_index(bin, way); 1038 1039 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1040 l2_lookup.index, &l2_lookup, 1041 true); 1042 if (rc < 0) 1043 return rc; 1044 1045 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1046 } 1047 1048 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1049 const unsigned char *addr, u16 vid) 1050 { 1051 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1052 struct sja1105_private *priv = ds->priv; 1053 int index, bin, way, rc; 1054 bool keep; 1055 1056 bin = sja1105et_fdb_hash(priv, addr, vid); 1057 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1058 &l2_lookup, NULL); 1059 if (way < 0) 1060 return 0; 1061 index = sja1105et_fdb_index(bin, way); 1062 1063 /* We have an FDB entry. Is our port in the destination mask? If yes, 1064 * we need to remove it. If the resulting port mask becomes empty, we 1065 * need to completely evict the FDB entry. 1066 * Otherwise we just write it back. 1067 */ 1068 l2_lookup.destports &= ~BIT(port); 1069 1070 if (l2_lookup.destports) 1071 keep = true; 1072 else 1073 keep = false; 1074 1075 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1076 index, &l2_lookup, keep); 1077 if (rc < 0) 1078 return rc; 1079 1080 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1081 } 1082 1083 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1084 const unsigned char *addr, u16 vid) 1085 { 1086 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1087 struct sja1105_private *priv = ds->priv; 1088 int rc, i; 1089 1090 /* Search for an existing entry in the FDB table */ 1091 l2_lookup.macaddr = ether_addr_to_u64(addr); 1092 l2_lookup.vlanid = vid; 1093 l2_lookup.iotag = SJA1105_S_TAG; 1094 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1095 if (dsa_port_is_vlan_filtering(&ds->ports[port])) { 1096 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1097 l2_lookup.mask_iotag = BIT(0); 1098 } else { 1099 l2_lookup.mask_vlanid = 0; 1100 l2_lookup.mask_iotag = 0; 1101 } 1102 l2_lookup.destports = BIT(port); 1103 1104 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1105 SJA1105_SEARCH, &l2_lookup); 1106 if (rc == 0) { 1107 /* Found and this port is already in the entry's 1108 * port mask => job done 1109 */ 1110 if (l2_lookup.destports & BIT(port)) 1111 return 0; 1112 /* l2_lookup.index is populated by the switch in case it 1113 * found something. 1114 */ 1115 l2_lookup.destports |= BIT(port); 1116 goto skip_finding_an_index; 1117 } 1118 1119 /* Not found, so try to find an unused spot in the FDB. 1120 * This is slightly inefficient because the strategy is knock-knock at 1121 * every possible position from 0 to 1023. 1122 */ 1123 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1124 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1125 i, NULL); 1126 if (rc < 0) 1127 break; 1128 } 1129 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1130 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1131 return -EINVAL; 1132 } 1133 l2_lookup.lockeds = true; 1134 l2_lookup.index = i; 1135 1136 skip_finding_an_index: 1137 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1138 l2_lookup.index, &l2_lookup, 1139 true); 1140 if (rc < 0) 1141 return rc; 1142 1143 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1144 } 1145 1146 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1147 const unsigned char *addr, u16 vid) 1148 { 1149 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1150 struct sja1105_private *priv = ds->priv; 1151 bool keep; 1152 int rc; 1153 1154 l2_lookup.macaddr = ether_addr_to_u64(addr); 1155 l2_lookup.vlanid = vid; 1156 l2_lookup.iotag = SJA1105_S_TAG; 1157 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1158 if (dsa_port_is_vlan_filtering(&ds->ports[port])) { 1159 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1160 l2_lookup.mask_iotag = BIT(0); 1161 } else { 1162 l2_lookup.mask_vlanid = 0; 1163 l2_lookup.mask_iotag = 0; 1164 } 1165 l2_lookup.destports = BIT(port); 1166 1167 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1168 SJA1105_SEARCH, &l2_lookup); 1169 if (rc < 0) 1170 return 0; 1171 1172 l2_lookup.destports &= ~BIT(port); 1173 1174 /* Decide whether we remove just this port from the FDB entry, 1175 * or if we remove it completely. 1176 */ 1177 if (l2_lookup.destports) 1178 keep = true; 1179 else 1180 keep = false; 1181 1182 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1183 l2_lookup.index, &l2_lookup, keep); 1184 if (rc < 0) 1185 return rc; 1186 1187 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1188 } 1189 1190 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1191 const unsigned char *addr, u16 vid) 1192 { 1193 struct sja1105_private *priv = ds->priv; 1194 1195 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1196 * so the switch still does some VLAN processing internally. 1197 * But Shared VLAN Learning (SVL) is also active, and it will take 1198 * care of autonomous forwarding between the unique pvid's of each 1199 * port. Here we just make sure that users can't add duplicate FDB 1200 * entries when in this mode - the actual VID doesn't matter except 1201 * for what gets printed in 'bridge fdb show'. In the case of zero, 1202 * no VID gets printed at all. 1203 */ 1204 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) 1205 vid = 0; 1206 1207 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1208 } 1209 1210 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1211 const unsigned char *addr, u16 vid) 1212 { 1213 struct sja1105_private *priv = ds->priv; 1214 1215 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) 1216 vid = 0; 1217 1218 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1219 } 1220 1221 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1222 dsa_fdb_dump_cb_t *cb, void *data) 1223 { 1224 struct sja1105_private *priv = ds->priv; 1225 struct device *dev = ds->dev; 1226 int i; 1227 1228 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1229 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1230 u8 macaddr[ETH_ALEN]; 1231 int rc; 1232 1233 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1234 i, &l2_lookup); 1235 /* No fdb entry at i, not an issue */ 1236 if (rc == -ENOENT) 1237 continue; 1238 if (rc) { 1239 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1240 return rc; 1241 } 1242 1243 /* FDB dump callback is per port. This means we have to 1244 * disregard a valid entry if it's not for this port, even if 1245 * only to revisit it later. This is inefficient because the 1246 * 1024-sized FDB table needs to be traversed 4 times through 1247 * SPI during a 'bridge fdb show' command. 1248 */ 1249 if (!(l2_lookup.destports & BIT(port))) 1250 continue; 1251 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1252 1253 /* We need to hide the dsa_8021q VLANs from the user. */ 1254 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) 1255 l2_lookup.vlanid = 0; 1256 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1257 } 1258 return 0; 1259 } 1260 1261 /* This callback needs to be present */ 1262 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port, 1263 const struct switchdev_obj_port_mdb *mdb) 1264 { 1265 return 0; 1266 } 1267 1268 static void sja1105_mdb_add(struct dsa_switch *ds, int port, 1269 const struct switchdev_obj_port_mdb *mdb) 1270 { 1271 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1272 } 1273 1274 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1275 const struct switchdev_obj_port_mdb *mdb) 1276 { 1277 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1278 } 1279 1280 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1281 struct net_device *br, bool member) 1282 { 1283 struct sja1105_l2_forwarding_entry *l2_fwd; 1284 struct sja1105_private *priv = ds->priv; 1285 int i, rc; 1286 1287 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1288 1289 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1290 /* Add this port to the forwarding matrix of the 1291 * other ports in the same bridge, and viceversa. 1292 */ 1293 if (!dsa_is_user_port(ds, i)) 1294 continue; 1295 /* For the ports already under the bridge, only one thing needs 1296 * to be done, and that is to add this port to their 1297 * reachability domain. So we can perform the SPI write for 1298 * them immediately. However, for this port itself (the one 1299 * that is new to the bridge), we need to add all other ports 1300 * to its reachability domain. So we do that incrementally in 1301 * this loop, and perform the SPI write only at the end, once 1302 * the domain contains all other bridge ports. 1303 */ 1304 if (i == port) 1305 continue; 1306 if (dsa_to_port(ds, i)->bridge_dev != br) 1307 continue; 1308 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1309 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1310 1311 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1312 i, &l2_fwd[i], true); 1313 if (rc < 0) 1314 return rc; 1315 } 1316 1317 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1318 port, &l2_fwd[port], true); 1319 } 1320 1321 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1322 u8 state) 1323 { 1324 struct sja1105_private *priv = ds->priv; 1325 struct sja1105_mac_config_entry *mac; 1326 1327 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1328 1329 switch (state) { 1330 case BR_STATE_DISABLED: 1331 case BR_STATE_BLOCKING: 1332 /* From UM10944 description of DRPDTAG (why put this there?): 1333 * "Management traffic flows to the port regardless of the state 1334 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1335 * At the moment no difference between DISABLED and BLOCKING. 1336 */ 1337 mac[port].ingress = false; 1338 mac[port].egress = false; 1339 mac[port].dyn_learn = false; 1340 break; 1341 case BR_STATE_LISTENING: 1342 mac[port].ingress = true; 1343 mac[port].egress = false; 1344 mac[port].dyn_learn = false; 1345 break; 1346 case BR_STATE_LEARNING: 1347 mac[port].ingress = true; 1348 mac[port].egress = false; 1349 mac[port].dyn_learn = true; 1350 break; 1351 case BR_STATE_FORWARDING: 1352 mac[port].ingress = true; 1353 mac[port].egress = true; 1354 mac[port].dyn_learn = true; 1355 break; 1356 default: 1357 dev_err(ds->dev, "invalid STP state: %d\n", state); 1358 return; 1359 } 1360 1361 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1362 &mac[port], true); 1363 } 1364 1365 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1366 struct net_device *br) 1367 { 1368 return sja1105_bridge_member(ds, port, br, true); 1369 } 1370 1371 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1372 struct net_device *br) 1373 { 1374 sja1105_bridge_member(ds, port, br, false); 1375 } 1376 1377 /* For situations where we need to change a setting at runtime that is only 1378 * available through the static configuration, resetting the switch in order 1379 * to upload the new static config is unavoidable. Back up the settings we 1380 * modify at runtime (currently only MAC) and restore them after uploading, 1381 * such that this operation is relatively seamless. 1382 */ 1383 static int sja1105_static_config_reload(struct sja1105_private *priv) 1384 { 1385 struct sja1105_mac_config_entry *mac; 1386 int speed_mbps[SJA1105_NUM_PORTS]; 1387 int rc, i; 1388 1389 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1390 1391 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1392 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1393 * switch wants to see in the static config in order to allow us to 1394 * change it through the dynamic interface later. 1395 */ 1396 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1397 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1398 mac[i].speed = SJA1105_SPEED_AUTO; 1399 } 1400 1401 /* Reset switch and send updated static configuration */ 1402 rc = sja1105_static_config_upload(priv); 1403 if (rc < 0) 1404 goto out; 1405 1406 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1407 * For these interfaces there is no dynamic configuration 1408 * needed, since PLLs have same settings at all speeds. 1409 */ 1410 rc = sja1105_clocking_setup(priv); 1411 if (rc < 0) 1412 goto out; 1413 1414 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1415 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1416 if (rc < 0) 1417 goto out; 1418 } 1419 out: 1420 return rc; 1421 } 1422 1423 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1424 { 1425 struct sja1105_mac_config_entry *mac; 1426 1427 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1428 1429 mac[port].vlanid = pvid; 1430 1431 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1432 &mac[port], true); 1433 } 1434 1435 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 1436 { 1437 struct sja1105_vlan_lookup_entry *vlan; 1438 int count, i; 1439 1440 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 1441 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 1442 1443 for (i = 0; i < count; i++) 1444 if (vlan[i].vlanid == vid) 1445 return i; 1446 1447 /* Return an invalid entry index if not found */ 1448 return -1; 1449 } 1450 1451 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid, 1452 bool enabled, bool untagged) 1453 { 1454 struct sja1105_vlan_lookup_entry *vlan; 1455 struct sja1105_table *table; 1456 bool keep = true; 1457 int match, rc; 1458 1459 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 1460 1461 match = sja1105_is_vlan_configured(priv, vid); 1462 if (match < 0) { 1463 /* Can't delete a missing entry. */ 1464 if (!enabled) 1465 return 0; 1466 rc = sja1105_table_resize(table, table->entry_count + 1); 1467 if (rc) 1468 return rc; 1469 match = table->entry_count - 1; 1470 } 1471 /* Assign pointer after the resize (it's new memory) */ 1472 vlan = table->entries; 1473 vlan[match].vlanid = vid; 1474 if (enabled) { 1475 vlan[match].vlan_bc |= BIT(port); 1476 vlan[match].vmemb_port |= BIT(port); 1477 } else { 1478 vlan[match].vlan_bc &= ~BIT(port); 1479 vlan[match].vmemb_port &= ~BIT(port); 1480 } 1481 /* Also unset tag_port if removing this VLAN was requested, 1482 * just so we don't have a confusing bitmap (no practical purpose). 1483 */ 1484 if (untagged || !enabled) 1485 vlan[match].tag_port &= ~BIT(port); 1486 else 1487 vlan[match].tag_port |= BIT(port); 1488 /* If there's no port left as member of this VLAN, 1489 * it's time for it to go. 1490 */ 1491 if (!vlan[match].vmemb_port) 1492 keep = false; 1493 1494 dev_dbg(priv->ds->dev, 1495 "%s: port %d, vid %llu, broadcast domain 0x%llx, " 1496 "port members 0x%llx, tagged ports 0x%llx, keep %d\n", 1497 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc, 1498 vlan[match].vmemb_port, vlan[match].tag_port, keep); 1499 1500 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, 1501 &vlan[match], keep); 1502 if (rc < 0) 1503 return rc; 1504 1505 if (!keep) 1506 return sja1105_table_delete_entry(table, match); 1507 1508 return 0; 1509 } 1510 1511 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 1512 { 1513 int rc, i; 1514 1515 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1516 rc = dsa_port_setup_8021q_tagging(ds, i, enabled); 1517 if (rc < 0) { 1518 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n", 1519 i, rc); 1520 return rc; 1521 } 1522 } 1523 dev_info(ds->dev, "%s switch tagging\n", 1524 enabled ? "Enabled" : "Disabled"); 1525 return 0; 1526 } 1527 1528 static enum dsa_tag_protocol 1529 sja1105_get_tag_protocol(struct dsa_switch *ds, int port) 1530 { 1531 return DSA_TAG_PROTO_SJA1105; 1532 } 1533 1534 /* This callback needs to be present */ 1535 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, 1536 const struct switchdev_obj_port_vlan *vlan) 1537 { 1538 return 0; 1539 } 1540 1541 /* The TPID setting belongs to the General Parameters table, 1542 * which can only be partially reconfigured at runtime (and not the TPID). 1543 * So a switch reset is required. 1544 */ 1545 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) 1546 { 1547 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1548 struct sja1105_general_params_entry *general_params; 1549 struct sja1105_private *priv = ds->priv; 1550 struct sja1105_table *table; 1551 u16 tpid, tpid2; 1552 int rc; 1553 1554 if (enabled) { 1555 /* Enable VLAN filtering. */ 1556 tpid = ETH_P_8021AD; 1557 tpid2 = ETH_P_8021Q; 1558 } else { 1559 /* Disable VLAN filtering. */ 1560 tpid = ETH_P_SJA1105; 1561 tpid2 = ETH_P_SJA1105; 1562 } 1563 1564 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1565 general_params = table->entries; 1566 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 1567 general_params->tpid = tpid; 1568 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 1569 general_params->tpid2 = tpid2; 1570 /* When VLAN filtering is on, we need to at least be able to 1571 * decode management traffic through the "backup plan". 1572 */ 1573 general_params->incl_srcpt1 = enabled; 1574 general_params->incl_srcpt0 = enabled; 1575 1576 /* VLAN filtering => independent VLAN learning. 1577 * No VLAN filtering => shared VLAN learning. 1578 * 1579 * In shared VLAN learning mode, untagged traffic still gets 1580 * pvid-tagged, and the FDB table gets populated with entries 1581 * containing the "real" (pvid or from VLAN tag) VLAN ID. 1582 * However the switch performs a masked L2 lookup in the FDB, 1583 * effectively only looking up a frame's DMAC (and not VID) for the 1584 * forwarding decision. 1585 * 1586 * This is extremely convenient for us, because in modes with 1587 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 1588 * each front panel port. This is good for identification but breaks 1589 * learning badly - the VID of the learnt FDB entry is unique, aka 1590 * no frames coming from any other port are going to have it. So 1591 * for forwarding purposes, this is as though learning was broken 1592 * (all frames get flooded). 1593 */ 1594 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1595 l2_lookup_params = table->entries; 1596 l2_lookup_params->shared_learn = !enabled; 1597 1598 rc = sja1105_static_config_reload(priv); 1599 if (rc) 1600 dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); 1601 1602 /* Switch port identification based on 802.1Q is only passable 1603 * if we are not under a vlan_filtering bridge. So make sure 1604 * the two configurations are mutually exclusive. 1605 */ 1606 return sja1105_setup_8021q_tagging(ds, !enabled); 1607 } 1608 1609 static void sja1105_vlan_add(struct dsa_switch *ds, int port, 1610 const struct switchdev_obj_port_vlan *vlan) 1611 { 1612 struct sja1105_private *priv = ds->priv; 1613 u16 vid; 1614 int rc; 1615 1616 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1617 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags & 1618 BRIDGE_VLAN_INFO_UNTAGGED); 1619 if (rc < 0) { 1620 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 1621 vid, port, rc); 1622 return; 1623 } 1624 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1625 rc = sja1105_pvid_apply(ds->priv, port, vid); 1626 if (rc < 0) { 1627 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n", 1628 vid, port, rc); 1629 return; 1630 } 1631 } 1632 } 1633 } 1634 1635 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 1636 const struct switchdev_obj_port_vlan *vlan) 1637 { 1638 struct sja1105_private *priv = ds->priv; 1639 u16 vid; 1640 int rc; 1641 1642 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1643 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags & 1644 BRIDGE_VLAN_INFO_UNTAGGED); 1645 if (rc < 0) { 1646 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", 1647 vid, port, rc); 1648 return rc; 1649 } 1650 } 1651 return 0; 1652 } 1653 1654 /* The programming model for the SJA1105 switch is "all-at-once" via static 1655 * configuration tables. Some of these can be dynamically modified at runtime, 1656 * but not the xMII mode parameters table. 1657 * Furthermode, some PHYs may not have crystals for generating their clocks 1658 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 1659 * ref_clk pin. So port clocking needs to be initialized early, before 1660 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 1661 * Setting correct PHY link speed does not matter now. 1662 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 1663 * bindings are not yet parsed by DSA core. We need to parse early so that we 1664 * can populate the xMII mode parameters table. 1665 */ 1666 static int sja1105_setup(struct dsa_switch *ds) 1667 { 1668 struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; 1669 struct sja1105_private *priv = ds->priv; 1670 int rc; 1671 1672 rc = sja1105_parse_dt(priv, ports); 1673 if (rc < 0) { 1674 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 1675 return rc; 1676 } 1677 1678 /* Error out early if internal delays are required through DT 1679 * and we can't apply them. 1680 */ 1681 rc = sja1105_parse_rgmii_delays(priv, ports); 1682 if (rc < 0) { 1683 dev_err(ds->dev, "RGMII delay not supported\n"); 1684 return rc; 1685 } 1686 1687 rc = sja1105_ptp_clock_register(priv); 1688 if (rc < 0) { 1689 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 1690 return rc; 1691 } 1692 /* Create and send configuration down to device */ 1693 rc = sja1105_static_config_load(priv, ports); 1694 if (rc < 0) { 1695 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 1696 return rc; 1697 } 1698 /* Configure the CGU (PHY link modes and speeds) */ 1699 rc = sja1105_clocking_setup(priv); 1700 if (rc < 0) { 1701 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 1702 return rc; 1703 } 1704 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 1705 * The only thing we can do to disable it is lie about what the 802.1Q 1706 * EtherType is. 1707 * So it will still try to apply VLAN filtering, but all ingress 1708 * traffic (except frames received with EtherType of ETH_P_SJA1105) 1709 * will be internally tagged with a distorted VLAN header where the 1710 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 1711 */ 1712 ds->vlan_filtering_is_global = true; 1713 1714 /* The DSA/switchdev model brings up switch ports in standalone mode by 1715 * default, and that means vlan_filtering is 0 since they're not under 1716 * a bridge, so it's safe to set up switch tagging at this time. 1717 */ 1718 return sja1105_setup_8021q_tagging(ds, true); 1719 } 1720 1721 static void sja1105_teardown(struct dsa_switch *ds) 1722 { 1723 struct sja1105_private *priv = ds->priv; 1724 1725 cancel_work_sync(&priv->tagger_data.rxtstamp_work); 1726 skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); 1727 sja1105_ptp_clock_unregister(priv); 1728 sja1105_static_config_free(&priv->static_config); 1729 } 1730 1731 static int sja1105_port_enable(struct dsa_switch *ds, int port, 1732 struct phy_device *phy) 1733 { 1734 struct net_device *slave; 1735 1736 if (!dsa_is_user_port(ds, port)) 1737 return 0; 1738 1739 slave = ds->ports[port].slave; 1740 1741 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1742 1743 return 0; 1744 } 1745 1746 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 1747 struct sk_buff *skb, bool takets) 1748 { 1749 struct sja1105_mgmt_entry mgmt_route = {0}; 1750 struct sja1105_private *priv = ds->priv; 1751 struct ethhdr *hdr; 1752 int timeout = 10; 1753 int rc; 1754 1755 hdr = eth_hdr(skb); 1756 1757 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 1758 mgmt_route.destports = BIT(port); 1759 mgmt_route.enfport = 1; 1760 mgmt_route.tsreg = 0; 1761 mgmt_route.takets = takets; 1762 1763 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 1764 slot, &mgmt_route, true); 1765 if (rc < 0) { 1766 kfree_skb(skb); 1767 return rc; 1768 } 1769 1770 /* Transfer skb to the host port. */ 1771 dsa_enqueue_skb(skb, ds->ports[port].slave); 1772 1773 /* Wait until the switch has processed the frame */ 1774 do { 1775 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 1776 slot, &mgmt_route); 1777 if (rc < 0) { 1778 dev_err_ratelimited(priv->ds->dev, 1779 "failed to poll for mgmt route\n"); 1780 continue; 1781 } 1782 1783 /* UM10944: The ENFPORT flag of the respective entry is 1784 * cleared when a match is found. The host can use this 1785 * flag as an acknowledgment. 1786 */ 1787 cpu_relax(); 1788 } while (mgmt_route.enfport && --timeout); 1789 1790 if (!timeout) { 1791 /* Clean up the management route so that a follow-up 1792 * frame may not match on it by mistake. 1793 * This is only hardware supported on P/Q/R/S - on E/T it is 1794 * a no-op and we are silently discarding the -EOPNOTSUPP. 1795 */ 1796 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 1797 slot, &mgmt_route, false); 1798 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 1799 } 1800 1801 return NETDEV_TX_OK; 1802 } 1803 1804 /* Deferred work is unfortunately necessary because setting up the management 1805 * route cannot be done from atomit context (SPI transfer takes a sleepable 1806 * lock on the bus) 1807 */ 1808 static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, 1809 struct sk_buff *skb) 1810 { 1811 struct sja1105_private *priv = ds->priv; 1812 struct sja1105_port *sp = &priv->ports[port]; 1813 struct skb_shared_hwtstamps shwt = {0}; 1814 int slot = sp->mgmt_slot; 1815 struct sk_buff *clone; 1816 u64 now, ts; 1817 int rc; 1818 1819 /* The tragic fact about the switch having 4x2 slots for installing 1820 * management routes is that all of them except one are actually 1821 * useless. 1822 * If 2 slots are simultaneously configured for two BPDUs sent to the 1823 * same (multicast) DMAC but on different egress ports, the switch 1824 * would confuse them and redirect first frame it receives on the CPU 1825 * port towards the port configured on the numerically first slot 1826 * (therefore wrong port), then second received frame on second slot 1827 * (also wrong port). 1828 * So for all practical purposes, there needs to be a lock that 1829 * prevents that from happening. The slot used here is utterly useless 1830 * (could have simply been 0 just as fine), but we are doing it 1831 * nonetheless, in case a smarter idea ever comes up in the future. 1832 */ 1833 mutex_lock(&priv->mgmt_lock); 1834 1835 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 1836 clone = DSA_SKB_CB(skb)->clone; 1837 1838 sja1105_mgmt_xmit(ds, port, slot, skb, !!clone); 1839 1840 if (!clone) 1841 goto out; 1842 1843 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; 1844 1845 mutex_lock(&priv->ptp_lock); 1846 1847 now = priv->tstamp_cc.read(&priv->tstamp_cc); 1848 1849 rc = sja1105_ptpegr_ts_poll(priv, slot, &ts); 1850 if (rc < 0) { 1851 dev_err(ds->dev, "xmit: timed out polling for tstamp\n"); 1852 kfree_skb(clone); 1853 goto out_unlock_ptp; 1854 } 1855 1856 ts = sja1105_tstamp_reconstruct(priv, now, ts); 1857 ts = timecounter_cyc2time(&priv->tstamp_tc, ts); 1858 1859 shwt.hwtstamp = ns_to_ktime(ts); 1860 skb_complete_tx_timestamp(clone, &shwt); 1861 1862 out_unlock_ptp: 1863 mutex_unlock(&priv->ptp_lock); 1864 out: 1865 mutex_unlock(&priv->mgmt_lock); 1866 return NETDEV_TX_OK; 1867 } 1868 1869 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 1870 * which cannot be reconfigured at runtime. So a switch reset is required. 1871 */ 1872 static int sja1105_set_ageing_time(struct dsa_switch *ds, 1873 unsigned int ageing_time) 1874 { 1875 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1876 struct sja1105_private *priv = ds->priv; 1877 struct sja1105_table *table; 1878 unsigned int maxage; 1879 1880 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1881 l2_lookup_params = table->entries; 1882 1883 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 1884 1885 if (l2_lookup_params->maxage == maxage) 1886 return 0; 1887 1888 l2_lookup_params->maxage = maxage; 1889 1890 return sja1105_static_config_reload(priv); 1891 } 1892 1893 /* Caller must hold priv->tagger_data.meta_lock */ 1894 static int sja1105_change_rxtstamping(struct sja1105_private *priv, 1895 bool on) 1896 { 1897 struct sja1105_general_params_entry *general_params; 1898 struct sja1105_table *table; 1899 int rc; 1900 1901 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1902 general_params = table->entries; 1903 general_params->send_meta1 = on; 1904 general_params->send_meta0 = on; 1905 1906 rc = sja1105_init_avb_params(priv, on); 1907 if (rc < 0) 1908 return rc; 1909 1910 /* Initialize the meta state machine to a known state */ 1911 if (priv->tagger_data.stampable_skb) { 1912 kfree_skb(priv->tagger_data.stampable_skb); 1913 priv->tagger_data.stampable_skb = NULL; 1914 } 1915 1916 return sja1105_static_config_reload(priv); 1917 } 1918 1919 static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, 1920 struct ifreq *ifr) 1921 { 1922 struct sja1105_private *priv = ds->priv; 1923 struct hwtstamp_config config; 1924 bool rx_on; 1925 int rc; 1926 1927 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1928 return -EFAULT; 1929 1930 switch (config.tx_type) { 1931 case HWTSTAMP_TX_OFF: 1932 priv->ports[port].hwts_tx_en = false; 1933 break; 1934 case HWTSTAMP_TX_ON: 1935 priv->ports[port].hwts_tx_en = true; 1936 break; 1937 default: 1938 return -ERANGE; 1939 } 1940 1941 switch (config.rx_filter) { 1942 case HWTSTAMP_FILTER_NONE: 1943 rx_on = false; 1944 break; 1945 default: 1946 rx_on = true; 1947 break; 1948 } 1949 1950 if (rx_on != priv->tagger_data.hwts_rx_en) { 1951 spin_lock(&priv->tagger_data.meta_lock); 1952 rc = sja1105_change_rxtstamping(priv, rx_on); 1953 spin_unlock(&priv->tagger_data.meta_lock); 1954 if (rc < 0) { 1955 dev_err(ds->dev, 1956 "Failed to change RX timestamping: %d\n", rc); 1957 return -EFAULT; 1958 } 1959 priv->tagger_data.hwts_rx_en = rx_on; 1960 } 1961 1962 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1963 return -EFAULT; 1964 return 0; 1965 } 1966 1967 static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, 1968 struct ifreq *ifr) 1969 { 1970 struct sja1105_private *priv = ds->priv; 1971 struct hwtstamp_config config; 1972 1973 config.flags = 0; 1974 if (priv->ports[port].hwts_tx_en) 1975 config.tx_type = HWTSTAMP_TX_ON; 1976 else 1977 config.tx_type = HWTSTAMP_TX_OFF; 1978 if (priv->tagger_data.hwts_rx_en) 1979 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1980 else 1981 config.rx_filter = HWTSTAMP_FILTER_NONE; 1982 1983 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1984 -EFAULT : 0; 1985 } 1986 1987 #define to_tagger(d) \ 1988 container_of((d), struct sja1105_tagger_data, rxtstamp_work) 1989 #define to_sja1105(d) \ 1990 container_of((d), struct sja1105_private, tagger_data) 1991 1992 static void sja1105_rxtstamp_work(struct work_struct *work) 1993 { 1994 struct sja1105_tagger_data *data = to_tagger(work); 1995 struct sja1105_private *priv = to_sja1105(data); 1996 struct sk_buff *skb; 1997 u64 now; 1998 1999 mutex_lock(&priv->ptp_lock); 2000 2001 now = priv->tstamp_cc.read(&priv->tstamp_cc); 2002 2003 while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) { 2004 struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb); 2005 u64 ts; 2006 2007 *shwt = (struct skb_shared_hwtstamps) {0}; 2008 2009 ts = SJA1105_SKB_CB(skb)->meta_tstamp; 2010 ts = sja1105_tstamp_reconstruct(priv, now, ts); 2011 ts = timecounter_cyc2time(&priv->tstamp_tc, ts); 2012 2013 shwt->hwtstamp = ns_to_ktime(ts); 2014 netif_rx_ni(skb); 2015 } 2016 2017 mutex_unlock(&priv->ptp_lock); 2018 } 2019 2020 /* Called from dsa_skb_defer_rx_timestamp */ 2021 static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, 2022 struct sk_buff *skb, unsigned int type) 2023 { 2024 struct sja1105_private *priv = ds->priv; 2025 struct sja1105_tagger_data *data = &priv->tagger_data; 2026 2027 if (!data->hwts_rx_en) 2028 return false; 2029 2030 /* We need to read the full PTP clock to reconstruct the Rx 2031 * timestamp. For that we need a sleepable context. 2032 */ 2033 skb_queue_tail(&data->skb_rxtstamp_queue, skb); 2034 schedule_work(&data->rxtstamp_work); 2035 return true; 2036 } 2037 2038 /* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone 2039 * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit 2040 * callback, where we will timestamp it synchronously. 2041 */ 2042 static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, 2043 struct sk_buff *skb, unsigned int type) 2044 { 2045 struct sja1105_private *priv = ds->priv; 2046 struct sja1105_port *sp = &priv->ports[port]; 2047 2048 if (!sp->hwts_tx_en) 2049 return false; 2050 2051 return true; 2052 } 2053 2054 static const struct dsa_switch_ops sja1105_switch_ops = { 2055 .get_tag_protocol = sja1105_get_tag_protocol, 2056 .setup = sja1105_setup, 2057 .teardown = sja1105_teardown, 2058 .set_ageing_time = sja1105_set_ageing_time, 2059 .phylink_validate = sja1105_phylink_validate, 2060 .phylink_mac_config = sja1105_mac_config, 2061 .phylink_mac_link_up = sja1105_mac_link_up, 2062 .phylink_mac_link_down = sja1105_mac_link_down, 2063 .get_strings = sja1105_get_strings, 2064 .get_ethtool_stats = sja1105_get_ethtool_stats, 2065 .get_sset_count = sja1105_get_sset_count, 2066 .get_ts_info = sja1105_get_ts_info, 2067 .port_enable = sja1105_port_enable, 2068 .port_fdb_dump = sja1105_fdb_dump, 2069 .port_fdb_add = sja1105_fdb_add, 2070 .port_fdb_del = sja1105_fdb_del, 2071 .port_bridge_join = sja1105_bridge_join, 2072 .port_bridge_leave = sja1105_bridge_leave, 2073 .port_stp_state_set = sja1105_bridge_stp_state_set, 2074 .port_vlan_prepare = sja1105_vlan_prepare, 2075 .port_vlan_filtering = sja1105_vlan_filtering, 2076 .port_vlan_add = sja1105_vlan_add, 2077 .port_vlan_del = sja1105_vlan_del, 2078 .port_mdb_prepare = sja1105_mdb_prepare, 2079 .port_mdb_add = sja1105_mdb_add, 2080 .port_mdb_del = sja1105_mdb_del, 2081 .port_deferred_xmit = sja1105_port_deferred_xmit, 2082 .port_hwtstamp_get = sja1105_hwtstamp_get, 2083 .port_hwtstamp_set = sja1105_hwtstamp_set, 2084 .port_rxtstamp = sja1105_port_rxtstamp, 2085 .port_txtstamp = sja1105_port_txtstamp, 2086 }; 2087 2088 static int sja1105_check_device_id(struct sja1105_private *priv) 2089 { 2090 const struct sja1105_regs *regs = priv->info->regs; 2091 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 2092 struct device *dev = &priv->spidev->dev; 2093 u64 device_id; 2094 u64 part_no; 2095 int rc; 2096 2097 rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id, 2098 &device_id, SJA1105_SIZE_DEVICE_ID); 2099 if (rc < 0) 2100 return rc; 2101 2102 if (device_id != priv->info->device_id) { 2103 dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n", 2104 priv->info->device_id, device_id); 2105 return -ENODEV; 2106 } 2107 2108 rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id, 2109 prod_id, SJA1105_SIZE_DEVICE_ID); 2110 if (rc < 0) 2111 return rc; 2112 2113 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 2114 2115 if (part_no != priv->info->part_no) { 2116 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", 2117 priv->info->part_no, part_no); 2118 return -ENODEV; 2119 } 2120 2121 return 0; 2122 } 2123 2124 static int sja1105_probe(struct spi_device *spi) 2125 { 2126 struct sja1105_tagger_data *tagger_data; 2127 struct device *dev = &spi->dev; 2128 struct sja1105_private *priv; 2129 struct dsa_switch *ds; 2130 int rc, i; 2131 2132 if (!dev->of_node) { 2133 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 2134 return -EINVAL; 2135 } 2136 2137 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 2138 if (!priv) 2139 return -ENOMEM; 2140 2141 /* Configure the optional reset pin and bring up switch */ 2142 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 2143 if (IS_ERR(priv->reset_gpio)) 2144 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 2145 else 2146 sja1105_hw_reset(priv->reset_gpio, 1, 1); 2147 2148 /* Populate our driver private structure (priv) based on 2149 * the device tree node that was probed (spi) 2150 */ 2151 priv->spidev = spi; 2152 spi_set_drvdata(spi, priv); 2153 2154 /* Configure the SPI bus */ 2155 spi->bits_per_word = 8; 2156 rc = spi_setup(spi); 2157 if (rc < 0) { 2158 dev_err(dev, "Could not init SPI\n"); 2159 return rc; 2160 } 2161 2162 priv->info = of_device_get_match_data(dev); 2163 2164 /* Detect hardware device */ 2165 rc = sja1105_check_device_id(priv); 2166 if (rc < 0) { 2167 dev_err(dev, "Device ID check failed: %d\n", rc); 2168 return rc; 2169 } 2170 2171 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 2172 2173 ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS); 2174 if (!ds) 2175 return -ENOMEM; 2176 2177 ds->ops = &sja1105_switch_ops; 2178 ds->priv = priv; 2179 priv->ds = ds; 2180 2181 tagger_data = &priv->tagger_data; 2182 skb_queue_head_init(&tagger_data->skb_rxtstamp_queue); 2183 INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work); 2184 2185 /* Connections between dsa_port and sja1105_port */ 2186 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 2187 struct sja1105_port *sp = &priv->ports[i]; 2188 2189 ds->ports[i].priv = sp; 2190 sp->dp = &ds->ports[i]; 2191 sp->data = tagger_data; 2192 } 2193 mutex_init(&priv->mgmt_lock); 2194 2195 return dsa_register_switch(priv->ds); 2196 } 2197 2198 static int sja1105_remove(struct spi_device *spi) 2199 { 2200 struct sja1105_private *priv = spi_get_drvdata(spi); 2201 2202 dsa_unregister_switch(priv->ds); 2203 return 0; 2204 } 2205 2206 static const struct of_device_id sja1105_dt_ids[] = { 2207 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 2208 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 2209 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 2210 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 2211 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 2212 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 2213 { /* sentinel */ }, 2214 }; 2215 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 2216 2217 static struct spi_driver sja1105_driver = { 2218 .driver = { 2219 .name = "sja1105", 2220 .owner = THIS_MODULE, 2221 .of_match_table = of_match_ptr(sja1105_dt_ids), 2222 }, 2223 .probe = sja1105_probe, 2224 .remove = sja1105_remove, 2225 }; 2226 2227 module_spi_driver(sja1105_driver); 2228 2229 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 2230 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 2231 MODULE_DESCRIPTION("SJA1105 Driver"); 2232 MODULE_LICENSE("GPL v2"); 2233