1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 #include "sja1105_sgmii.h" 26 #include "sja1105_tas.h" 27 28 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 29 unsigned int startup_delay) 30 { 31 gpiod_set_value_cansleep(gpio, 1); 32 /* Wait for minimum reset pulse length */ 33 msleep(pulse_len); 34 gpiod_set_value_cansleep(gpio, 0); 35 /* Wait until chip is ready after reset */ 36 msleep(startup_delay); 37 } 38 39 static void 40 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 41 int from, int to, bool allow) 42 { 43 if (allow) { 44 l2_fwd[from].bc_domain |= BIT(to); 45 l2_fwd[from].reach_port |= BIT(to); 46 l2_fwd[from].fl_domain |= BIT(to); 47 } else { 48 l2_fwd[from].bc_domain &= ~BIT(to); 49 l2_fwd[from].reach_port &= ~BIT(to); 50 l2_fwd[from].fl_domain &= ~BIT(to); 51 } 52 } 53 54 /* Structure used to temporarily transport device tree 55 * settings into sja1105_setup 56 */ 57 struct sja1105_dt_port { 58 phy_interface_t phy_mode; 59 sja1105_mii_role_t role; 60 }; 61 62 static int sja1105_init_mac_settings(struct sja1105_private *priv) 63 { 64 struct sja1105_mac_config_entry default_mac = { 65 /* Enable all 8 priority queues on egress. 66 * Every queue i holds top[i] - base[i] frames. 67 * Sum of top[i] - base[i] is 511 (max hardware limit). 68 */ 69 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 70 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 71 .enabled = {true, true, true, true, true, true, true, true}, 72 /* Keep standard IFG of 12 bytes on egress. */ 73 .ifg = 0, 74 /* Always put the MAC speed in automatic mode, where it can be 75 * adjusted at runtime by PHYLINK. 76 */ 77 .speed = SJA1105_SPEED_AUTO, 78 /* No static correction for 1-step 1588 events */ 79 .tp_delin = 0, 80 .tp_delout = 0, 81 /* Disable aging for critical TTEthernet traffic */ 82 .maxage = 0xFF, 83 /* Internal VLAN (pvid) to apply to untagged ingress */ 84 .vlanprio = 0, 85 .vlanid = 1, 86 .ing_mirr = false, 87 .egr_mirr = false, 88 /* Don't drop traffic with other EtherType than ETH_P_IP */ 89 .drpnona664 = false, 90 /* Don't drop double-tagged traffic */ 91 .drpdtag = false, 92 /* Don't drop untagged traffic */ 93 .drpuntag = false, 94 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 95 .retag = false, 96 /* Disable learning and I/O on user ports by default - 97 * STP will enable it. 98 */ 99 .dyn_learn = false, 100 .egress = false, 101 .ingress = false, 102 }; 103 struct sja1105_mac_config_entry *mac; 104 struct sja1105_table *table; 105 int i; 106 107 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 108 109 /* Discard previous MAC Configuration Table */ 110 if (table->entry_count) { 111 kfree(table->entries); 112 table->entry_count = 0; 113 } 114 115 table->entries = kcalloc(SJA1105_NUM_PORTS, 116 table->ops->unpacked_entry_size, GFP_KERNEL); 117 if (!table->entries) 118 return -ENOMEM; 119 120 table->entry_count = SJA1105_NUM_PORTS; 121 122 mac = table->entries; 123 124 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 125 mac[i] = default_mac; 126 if (i == dsa_upstream_port(priv->ds, i)) { 127 /* STP doesn't get called for CPU port, so we need to 128 * set the I/O parameters statically. 129 */ 130 mac[i].dyn_learn = true; 131 mac[i].ingress = true; 132 mac[i].egress = true; 133 } 134 } 135 136 return 0; 137 } 138 139 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port) 140 { 141 if (priv->info->part_no != SJA1105R_PART_NO && 142 priv->info->part_no != SJA1105S_PART_NO) 143 return false; 144 145 if (port != SJA1105_SGMII_PORT) 146 return false; 147 148 if (dsa_is_unused_port(priv->ds, port)) 149 return false; 150 151 return true; 152 } 153 154 static int sja1105_init_mii_settings(struct sja1105_private *priv, 155 struct sja1105_dt_port *ports) 156 { 157 struct device *dev = &priv->spidev->dev; 158 struct sja1105_xmii_params_entry *mii; 159 struct sja1105_table *table; 160 int i; 161 162 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 163 164 /* Discard previous xMII Mode Parameters Table */ 165 if (table->entry_count) { 166 kfree(table->entries); 167 table->entry_count = 0; 168 } 169 170 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, 171 table->ops->unpacked_entry_size, GFP_KERNEL); 172 if (!table->entries) 173 return -ENOMEM; 174 175 /* Override table based on PHYLINK DT bindings */ 176 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; 177 178 mii = table->entries; 179 180 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 181 if (dsa_is_unused_port(priv->ds, i)) 182 continue; 183 184 switch (ports[i].phy_mode) { 185 case PHY_INTERFACE_MODE_MII: 186 mii->xmii_mode[i] = XMII_MODE_MII; 187 break; 188 case PHY_INTERFACE_MODE_RMII: 189 mii->xmii_mode[i] = XMII_MODE_RMII; 190 break; 191 case PHY_INTERFACE_MODE_RGMII: 192 case PHY_INTERFACE_MODE_RGMII_ID: 193 case PHY_INTERFACE_MODE_RGMII_RXID: 194 case PHY_INTERFACE_MODE_RGMII_TXID: 195 mii->xmii_mode[i] = XMII_MODE_RGMII; 196 break; 197 case PHY_INTERFACE_MODE_SGMII: 198 if (!sja1105_supports_sgmii(priv, i)) 199 return -EINVAL; 200 mii->xmii_mode[i] = XMII_MODE_SGMII; 201 break; 202 default: 203 dev_err(dev, "Unsupported PHY mode %s!\n", 204 phy_modes(ports[i].phy_mode)); 205 } 206 207 /* Even though the SerDes port is able to drive SGMII autoneg 208 * like a PHY would, from the perspective of the XMII tables, 209 * the SGMII port should always be put in MAC mode. 210 */ 211 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII) 212 mii->phy_mac[i] = XMII_MAC; 213 else 214 mii->phy_mac[i] = ports[i].role; 215 } 216 return 0; 217 } 218 219 static int sja1105_init_static_fdb(struct sja1105_private *priv) 220 { 221 struct sja1105_table *table; 222 223 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 224 225 /* We only populate the FDB table through dynamic 226 * L2 Address Lookup entries 227 */ 228 if (table->entry_count) { 229 kfree(table->entries); 230 table->entry_count = 0; 231 } 232 return 0; 233 } 234 235 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 236 { 237 struct sja1105_table *table; 238 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS; 239 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 240 /* Learned FDB entries are forgotten after 300 seconds */ 241 .maxage = SJA1105_AGEING_TIME_MS(300000), 242 /* All entries within a FDB bin are available for learning */ 243 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 244 /* And the P/Q/R/S equivalent setting: */ 245 .start_dynspc = 0, 246 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries, 247 max_fdb_entries, max_fdb_entries, }, 248 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 249 .poly = 0x97, 250 /* This selects between Independent VLAN Learning (IVL) and 251 * Shared VLAN Learning (SVL) 252 */ 253 .shared_learn = true, 254 /* Don't discard management traffic based on ENFPORT - 255 * we don't perform SMAC port enforcement anyway, so 256 * what we are setting here doesn't matter. 257 */ 258 .no_enf_hostprt = false, 259 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 260 * Maybe correlate with no_linklocal_learn from bridge driver? 261 */ 262 .no_mgmt_learn = true, 263 /* P/Q/R/S only */ 264 .use_static = true, 265 /* Dynamically learned FDB entries can overwrite other (older) 266 * dynamic FDB entries 267 */ 268 .owr_dyn = true, 269 .drpnolearn = true, 270 }; 271 272 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 273 274 if (table->entry_count) { 275 kfree(table->entries); 276 table->entry_count = 0; 277 } 278 279 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, 280 table->ops->unpacked_entry_size, GFP_KERNEL); 281 if (!table->entries) 282 return -ENOMEM; 283 284 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; 285 286 /* This table only has a single entry */ 287 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 288 default_l2_lookup_params; 289 290 return 0; 291 } 292 293 static int sja1105_init_static_vlan(struct sja1105_private *priv) 294 { 295 struct sja1105_table *table; 296 struct sja1105_vlan_lookup_entry pvid = { 297 .ving_mirr = 0, 298 .vegr_mirr = 0, 299 .vmemb_port = 0, 300 .vlan_bc = 0, 301 .tag_port = 0, 302 .vlanid = 1, 303 }; 304 int i; 305 306 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 307 308 /* The static VLAN table will only contain the initial pvid of 1. 309 * All other VLANs are to be configured through dynamic entries, 310 * and kept in the static configuration table as backing memory. 311 */ 312 if (table->entry_count) { 313 kfree(table->entries); 314 table->entry_count = 0; 315 } 316 317 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 318 GFP_KERNEL); 319 if (!table->entries) 320 return -ENOMEM; 321 322 table->entry_count = 1; 323 324 /* VLAN 1: all DT-defined ports are members; no restrictions on 325 * forwarding; always transmit priority-tagged frames as untagged. 326 */ 327 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 328 pvid.vmemb_port |= BIT(i); 329 pvid.vlan_bc |= BIT(i); 330 pvid.tag_port &= ~BIT(i); 331 } 332 333 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 334 return 0; 335 } 336 337 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 338 { 339 struct sja1105_l2_forwarding_entry *l2fwd; 340 struct sja1105_table *table; 341 int i, j; 342 343 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 344 345 if (table->entry_count) { 346 kfree(table->entries); 347 table->entry_count = 0; 348 } 349 350 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, 351 table->ops->unpacked_entry_size, GFP_KERNEL); 352 if (!table->entries) 353 return -ENOMEM; 354 355 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; 356 357 l2fwd = table->entries; 358 359 /* First 5 entries define the forwarding rules */ 360 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 361 unsigned int upstream = dsa_upstream_port(priv->ds, i); 362 363 for (j = 0; j < SJA1105_NUM_TC; j++) 364 l2fwd[i].vlan_pmap[j] = j; 365 366 if (i == upstream) 367 continue; 368 369 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 370 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 371 } 372 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 373 * Create a one-to-one mapping. 374 */ 375 for (i = 0; i < SJA1105_NUM_TC; i++) 376 for (j = 0; j < SJA1105_NUM_PORTS; j++) 377 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; 378 379 return 0; 380 } 381 382 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 383 { 384 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { 385 /* Disallow dynamic reconfiguration of vlan_pmap */ 386 .max_dynp = 0, 387 /* Use a single memory partition for all ingress queues */ 388 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, 389 }; 390 struct sja1105_table *table; 391 392 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 393 394 if (table->entry_count) { 395 kfree(table->entries); 396 table->entry_count = 0; 397 } 398 399 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, 400 table->ops->unpacked_entry_size, GFP_KERNEL); 401 if (!table->entries) 402 return -ENOMEM; 403 404 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; 405 406 /* This table only has a single entry */ 407 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = 408 default_l2fwd_params; 409 410 return 0; 411 } 412 413 static int sja1105_init_general_params(struct sja1105_private *priv) 414 { 415 struct sja1105_general_params_entry default_general_params = { 416 /* Allow dynamic changing of the mirror port */ 417 .mirr_ptacu = true, 418 .switchid = priv->ds->index, 419 /* Priority queue for link-local management frames 420 * (both ingress to and egress from CPU - PTP, STP etc) 421 */ 422 .hostprio = 7, 423 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 424 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 425 .incl_srcpt1 = false, 426 .send_meta1 = false, 427 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 428 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 429 .incl_srcpt0 = false, 430 .send_meta0 = false, 431 /* The destination for traffic matching mac_fltres1 and 432 * mac_fltres0 on all ports except host_port. Such traffic 433 * receieved on host_port itself would be dropped, except 434 * by installing a temporary 'management route' 435 */ 436 .host_port = dsa_upstream_port(priv->ds, 0), 437 /* Default to an invalid value */ 438 .mirr_port = SJA1105_NUM_PORTS, 439 /* Link-local traffic received on casc_port will be forwarded 440 * to host_port without embedding the source port and device ID 441 * info in the destination MAC address (presumably because it 442 * is a cascaded port and a downstream SJA switch already did 443 * that). Default to an invalid port (to disable the feature) 444 * and overwrite this if we find any DSA (cascaded) ports. 445 */ 446 .casc_port = SJA1105_NUM_PORTS, 447 /* No TTEthernet */ 448 .vllupformat = 0, 449 .vlmarker = 0, 450 .vlmask = 0, 451 /* Only update correctionField for 1-step PTP (L2 transport) */ 452 .ignore2stf = 0, 453 /* Forcefully disable VLAN filtering by telling 454 * the switch that VLAN has a different EtherType. 455 */ 456 .tpid = ETH_P_SJA1105, 457 .tpid2 = ETH_P_SJA1105, 458 }; 459 struct sja1105_table *table; 460 461 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 462 463 if (table->entry_count) { 464 kfree(table->entries); 465 table->entry_count = 0; 466 } 467 468 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, 469 table->ops->unpacked_entry_size, GFP_KERNEL); 470 if (!table->entries) 471 return -ENOMEM; 472 473 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; 474 475 /* This table only has a single entry */ 476 ((struct sja1105_general_params_entry *)table->entries)[0] = 477 default_general_params; 478 479 return 0; 480 } 481 482 static int sja1105_init_avb_params(struct sja1105_private *priv) 483 { 484 struct sja1105_avb_params_entry *avb; 485 struct sja1105_table *table; 486 487 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 488 489 /* Discard previous AVB Parameters Table */ 490 if (table->entry_count) { 491 kfree(table->entries); 492 table->entry_count = 0; 493 } 494 495 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, 496 table->ops->unpacked_entry_size, GFP_KERNEL); 497 if (!table->entries) 498 return -ENOMEM; 499 500 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; 501 502 avb = table->entries; 503 504 /* Configure the MAC addresses for meta frames */ 505 avb->destmeta = SJA1105_META_DMAC; 506 avb->srcmeta = SJA1105_META_SMAC; 507 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by 508 * default. This is because there might be boards with a hardware 509 * layout where enabling the pin as output might cause an electrical 510 * clash. On E/T the pin is always an output, which the board designers 511 * probably already knew, so even if there are going to be electrical 512 * issues, there's nothing we can do. 513 */ 514 avb->cas_master = false; 515 516 return 0; 517 } 518 519 /* The L2 policing table is 2-stage. The table is looked up for each frame 520 * according to the ingress port, whether it was broadcast or not, and the 521 * classified traffic class (given by VLAN PCP). This portion of the lookup is 522 * fixed, and gives access to the SHARINDX, an indirection register pointing 523 * within the policing table itself, which is used to resolve the policer that 524 * will be used for this frame. 525 * 526 * Stage 1 Stage 2 527 * +------------+--------+ +---------------------------------+ 528 * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU | 529 * +------------+--------+ +---------------------------------+ 530 * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU | 531 * +------------+--------+ +---------------------------------+ 532 * ... | Policer 2: Rate, Burst, MTU | 533 * +------------+--------+ +---------------------------------+ 534 * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU | 535 * +------------+--------+ +---------------------------------+ 536 * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU | 537 * +------------+--------+ +---------------------------------+ 538 * ... | Policer 5: Rate, Burst, MTU | 539 * +------------+--------+ +---------------------------------+ 540 * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU | 541 * +------------+--------+ +---------------------------------+ 542 * ... | Policer 7: Rate, Burst, MTU | 543 * +------------+--------+ +---------------------------------+ 544 * |Port 4 TC 7 |SHARINDX| ... 545 * +------------+--------+ 546 * |Port 0 BCAST|SHARINDX| ... 547 * +------------+--------+ 548 * |Port 1 BCAST|SHARINDX| ... 549 * +------------+--------+ 550 * ... ... 551 * +------------+--------+ +---------------------------------+ 552 * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU | 553 * +------------+--------+ +---------------------------------+ 554 * 555 * In this driver, we shall use policers 0-4 as statically alocated port 556 * (matchall) policers. So we need to make the SHARINDX for all lookups 557 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast 558 * lookup) equal. 559 * The remaining policers (40) shall be dynamically allocated for flower 560 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff. 561 */ 562 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 563 564 static int sja1105_init_l2_policing(struct sja1105_private *priv) 565 { 566 struct sja1105_l2_policing_entry *policing; 567 struct sja1105_table *table; 568 int port, tc; 569 570 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 571 572 /* Discard previous L2 Policing Table */ 573 if (table->entry_count) { 574 kfree(table->entries); 575 table->entry_count = 0; 576 } 577 578 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, 579 table->ops->unpacked_entry_size, GFP_KERNEL); 580 if (!table->entries) 581 return -ENOMEM; 582 583 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; 584 585 policing = table->entries; 586 587 /* Setup shared indices for the matchall policers */ 588 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 589 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port; 590 591 for (tc = 0; tc < SJA1105_NUM_TC; tc++) 592 policing[port * SJA1105_NUM_TC + tc].sharindx = port; 593 594 policing[bcast].sharindx = port; 595 } 596 597 /* Setup the matchall policer parameters */ 598 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 599 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 600 601 if (dsa_is_cpu_port(priv->ds, port)) 602 mtu += VLAN_HLEN; 603 604 policing[port].smax = 65535; /* Burst size in bytes */ 605 policing[port].rate = SJA1105_RATE_MBPS(1000); 606 policing[port].maxlen = mtu; 607 policing[port].partition = 0; 608 } 609 610 return 0; 611 } 612 613 static int sja1105_static_config_load(struct sja1105_private *priv, 614 struct sja1105_dt_port *ports) 615 { 616 int rc; 617 618 sja1105_static_config_free(&priv->static_config); 619 rc = sja1105_static_config_init(&priv->static_config, 620 priv->info->static_ops, 621 priv->info->device_id); 622 if (rc) 623 return rc; 624 625 /* Build static configuration */ 626 rc = sja1105_init_mac_settings(priv); 627 if (rc < 0) 628 return rc; 629 rc = sja1105_init_mii_settings(priv, ports); 630 if (rc < 0) 631 return rc; 632 rc = sja1105_init_static_fdb(priv); 633 if (rc < 0) 634 return rc; 635 rc = sja1105_init_static_vlan(priv); 636 if (rc < 0) 637 return rc; 638 rc = sja1105_init_l2_lookup_params(priv); 639 if (rc < 0) 640 return rc; 641 rc = sja1105_init_l2_forwarding(priv); 642 if (rc < 0) 643 return rc; 644 rc = sja1105_init_l2_forwarding_params(priv); 645 if (rc < 0) 646 return rc; 647 rc = sja1105_init_l2_policing(priv); 648 if (rc < 0) 649 return rc; 650 rc = sja1105_init_general_params(priv); 651 if (rc < 0) 652 return rc; 653 rc = sja1105_init_avb_params(priv); 654 if (rc < 0) 655 return rc; 656 657 /* Send initial configuration to hardware via SPI */ 658 return sja1105_static_config_upload(priv); 659 } 660 661 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 662 const struct sja1105_dt_port *ports) 663 { 664 int i; 665 666 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 667 if (ports[i].role == XMII_MAC) 668 continue; 669 670 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 671 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 672 priv->rgmii_rx_delay[i] = true; 673 674 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 675 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 676 priv->rgmii_tx_delay[i] = true; 677 678 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 679 !priv->info->setup_rgmii_delay) 680 return -EINVAL; 681 } 682 return 0; 683 } 684 685 static int sja1105_parse_ports_node(struct sja1105_private *priv, 686 struct sja1105_dt_port *ports, 687 struct device_node *ports_node) 688 { 689 struct device *dev = &priv->spidev->dev; 690 struct device_node *child; 691 692 for_each_available_child_of_node(ports_node, child) { 693 struct device_node *phy_node; 694 phy_interface_t phy_mode; 695 u32 index; 696 int err; 697 698 /* Get switch port number from DT */ 699 if (of_property_read_u32(child, "reg", &index) < 0) { 700 dev_err(dev, "Port number not defined in device tree " 701 "(property \"reg\")\n"); 702 of_node_put(child); 703 return -ENODEV; 704 } 705 706 /* Get PHY mode from DT */ 707 err = of_get_phy_mode(child, &phy_mode); 708 if (err) { 709 dev_err(dev, "Failed to read phy-mode or " 710 "phy-interface-type property for port %d\n", 711 index); 712 of_node_put(child); 713 return -ENODEV; 714 } 715 ports[index].phy_mode = phy_mode; 716 717 phy_node = of_parse_phandle(child, "phy-handle", 0); 718 if (!phy_node) { 719 if (!of_phy_is_fixed_link(child)) { 720 dev_err(dev, "phy-handle or fixed-link " 721 "properties missing!\n"); 722 of_node_put(child); 723 return -ENODEV; 724 } 725 /* phy-handle is missing, but fixed-link isn't. 726 * So it's a fixed link. Default to PHY role. 727 */ 728 ports[index].role = XMII_PHY; 729 } else { 730 /* phy-handle present => put port in MAC role */ 731 ports[index].role = XMII_MAC; 732 of_node_put(phy_node); 733 } 734 735 /* The MAC/PHY role can be overridden with explicit bindings */ 736 if (of_property_read_bool(child, "sja1105,role-mac")) 737 ports[index].role = XMII_MAC; 738 else if (of_property_read_bool(child, "sja1105,role-phy")) 739 ports[index].role = XMII_PHY; 740 } 741 742 return 0; 743 } 744 745 static int sja1105_parse_dt(struct sja1105_private *priv, 746 struct sja1105_dt_port *ports) 747 { 748 struct device *dev = &priv->spidev->dev; 749 struct device_node *switch_node = dev->of_node; 750 struct device_node *ports_node; 751 int rc; 752 753 ports_node = of_get_child_by_name(switch_node, "ports"); 754 if (!ports_node) { 755 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 756 return -ENODEV; 757 } 758 759 rc = sja1105_parse_ports_node(priv, ports, ports_node); 760 of_node_put(ports_node); 761 762 return rc; 763 } 764 765 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg) 766 { 767 const struct sja1105_regs *regs = priv->info->regs; 768 u32 val; 769 int rc; 770 771 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val, 772 NULL); 773 if (rc < 0) 774 return rc; 775 776 return val; 777 } 778 779 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg, 780 u16 pcs_val) 781 { 782 const struct sja1105_regs *regs = priv->info->regs; 783 u32 val = pcs_val; 784 int rc; 785 786 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val, 787 NULL); 788 if (rc < 0) 789 return rc; 790 791 return val; 792 } 793 794 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv, 795 bool an_enabled, bool an_master) 796 { 797 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII; 798 799 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to 800 * stop the clock during LPI mode, make the MAC reconfigure 801 * autonomously after PCS autoneg is done, flush the internal FIFOs. 802 */ 803 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 | 804 SJA1105_DC1_CLOCK_STOP_EN | 805 SJA1105_DC1_MAC_AUTO_SW | 806 SJA1105_DC1_INIT); 807 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */ 808 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE); 809 /* AUTONEG_CONTROL: Use SGMII autoneg */ 810 if (an_master) 811 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK; 812 sja1105_sgmii_write(priv, SJA1105_AC, ac); 813 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise, 814 * sja1105_sgmii_pcs_force_speed must be called later for the link 815 * to become operational. 816 */ 817 if (an_enabled) 818 sja1105_sgmii_write(priv, MII_BMCR, 819 BMCR_ANENABLE | BMCR_ANRESTART); 820 } 821 822 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv, 823 int speed) 824 { 825 int pcs_speed; 826 827 switch (speed) { 828 case SPEED_1000: 829 pcs_speed = BMCR_SPEED1000; 830 break; 831 case SPEED_100: 832 pcs_speed = BMCR_SPEED100; 833 break; 834 case SPEED_10: 835 pcs_speed = BMCR_SPEED10; 836 break; 837 default: 838 dev_err(priv->ds->dev, "Invalid speed %d\n", speed); 839 return; 840 } 841 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX); 842 } 843 844 /* Convert link speed from SJA1105 to ethtool encoding */ 845 static int sja1105_speed[] = { 846 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN, 847 [SJA1105_SPEED_10MBPS] = SPEED_10, 848 [SJA1105_SPEED_100MBPS] = SPEED_100, 849 [SJA1105_SPEED_1000MBPS] = SPEED_1000, 850 }; 851 852 /* Set link speed in the MAC configuration for a specific port. */ 853 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 854 int speed_mbps) 855 { 856 struct sja1105_xmii_params_entry *mii; 857 struct sja1105_mac_config_entry *mac; 858 struct device *dev = priv->ds->dev; 859 sja1105_phy_interface_t phy_mode; 860 sja1105_speed_t speed; 861 int rc; 862 863 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 864 * tables. On E/T, MAC reconfig tables are not readable, only writable. 865 * We have to *know* what the MAC looks like. For the sake of keeping 866 * the code common, we'll use the static configuration tables as a 867 * reasonable approximation for both E/T and P/Q/R/S. 868 */ 869 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 870 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 871 872 switch (speed_mbps) { 873 case SPEED_UNKNOWN: 874 /* PHYLINK called sja1105_mac_config() to inform us about 875 * the state->interface, but AN has not completed and the 876 * speed is not yet valid. UM10944.pdf says that setting 877 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 878 * ok for power consumption in case AN will never complete - 879 * otherwise PHYLINK should come back with a new update. 880 */ 881 speed = SJA1105_SPEED_AUTO; 882 break; 883 case SPEED_10: 884 speed = SJA1105_SPEED_10MBPS; 885 break; 886 case SPEED_100: 887 speed = SJA1105_SPEED_100MBPS; 888 break; 889 case SPEED_1000: 890 speed = SJA1105_SPEED_1000MBPS; 891 break; 892 default: 893 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 894 return -EINVAL; 895 } 896 897 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 898 * table, since this will be used for the clocking setup, and we no 899 * longer need to store it in the static config (already told hardware 900 * we want auto during upload phase). 901 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and 902 * we need to configure the PCS only (if even that). 903 */ 904 if (sja1105_supports_sgmii(priv, port)) 905 mac[port].speed = SJA1105_SPEED_1000MBPS; 906 else 907 mac[port].speed = speed; 908 909 /* Write to the dynamic reconfiguration tables */ 910 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 911 &mac[port], true); 912 if (rc < 0) { 913 dev_err(dev, "Failed to write MAC config: %d\n", rc); 914 return rc; 915 } 916 917 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 918 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 919 * RMII no change of the clock setup is required. Actually, changing 920 * the clock setup does interrupt the clock signal for a certain time 921 * which causes trouble for all PHYs relying on this signal. 922 */ 923 phy_mode = mii->xmii_mode[port]; 924 if (phy_mode != XMII_MODE_RGMII) 925 return 0; 926 927 return sja1105_clocking_setup_port(priv, port); 928 } 929 930 /* The SJA1105 MAC programming model is through the static config (the xMII 931 * Mode table cannot be dynamically reconfigured), and we have to program 932 * that early (earlier than PHYLINK calls us, anyway). 933 * So just error out in case the connected PHY attempts to change the initial 934 * system interface MII protocol from what is defined in the DT, at least for 935 * now. 936 */ 937 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 938 phy_interface_t interface) 939 { 940 struct sja1105_xmii_params_entry *mii; 941 sja1105_phy_interface_t phy_mode; 942 943 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 944 phy_mode = mii->xmii_mode[port]; 945 946 switch (interface) { 947 case PHY_INTERFACE_MODE_MII: 948 return (phy_mode != XMII_MODE_MII); 949 case PHY_INTERFACE_MODE_RMII: 950 return (phy_mode != XMII_MODE_RMII); 951 case PHY_INTERFACE_MODE_RGMII: 952 case PHY_INTERFACE_MODE_RGMII_ID: 953 case PHY_INTERFACE_MODE_RGMII_RXID: 954 case PHY_INTERFACE_MODE_RGMII_TXID: 955 return (phy_mode != XMII_MODE_RGMII); 956 case PHY_INTERFACE_MODE_SGMII: 957 return (phy_mode != XMII_MODE_SGMII); 958 default: 959 return true; 960 } 961 } 962 963 static void sja1105_mac_config(struct dsa_switch *ds, int port, 964 unsigned int mode, 965 const struct phylink_link_state *state) 966 { 967 struct sja1105_private *priv = ds->priv; 968 bool is_sgmii = sja1105_supports_sgmii(priv, port); 969 970 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { 971 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", 972 phy_modes(state->interface)); 973 return; 974 } 975 976 if (phylink_autoneg_inband(mode) && !is_sgmii) { 977 dev_err(ds->dev, "In-band AN not supported!\n"); 978 return; 979 } 980 981 if (is_sgmii) 982 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode), 983 false); 984 } 985 986 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 987 unsigned int mode, 988 phy_interface_t interface) 989 { 990 sja1105_inhibit_tx(ds->priv, BIT(port), true); 991 } 992 993 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 994 unsigned int mode, 995 phy_interface_t interface, 996 struct phy_device *phydev, 997 int speed, int duplex, 998 bool tx_pause, bool rx_pause) 999 { 1000 struct sja1105_private *priv = ds->priv; 1001 1002 sja1105_adjust_port_config(priv, port, speed); 1003 1004 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode)) 1005 sja1105_sgmii_pcs_force_speed(priv, speed); 1006 1007 sja1105_inhibit_tx(priv, BIT(port), false); 1008 } 1009 1010 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 1011 unsigned long *supported, 1012 struct phylink_link_state *state) 1013 { 1014 /* Construct a new mask which exhaustively contains all link features 1015 * supported by the MAC, and then apply that (logical AND) to what will 1016 * be sent to the PHY for "marketing". 1017 */ 1018 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1019 struct sja1105_private *priv = ds->priv; 1020 struct sja1105_xmii_params_entry *mii; 1021 1022 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1023 1024 /* include/linux/phylink.h says: 1025 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 1026 * expects the MAC driver to return all supported link modes. 1027 */ 1028 if (state->interface != PHY_INTERFACE_MODE_NA && 1029 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1030 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1031 return; 1032 } 1033 1034 /* The MAC does not support pause frames, and also doesn't 1035 * support half-duplex traffic modes. 1036 */ 1037 phylink_set(mask, Autoneg); 1038 phylink_set(mask, MII); 1039 phylink_set(mask, 10baseT_Full); 1040 phylink_set(mask, 100baseT_Full); 1041 phylink_set(mask, 100baseT1_Full); 1042 if (mii->xmii_mode[port] == XMII_MODE_RGMII || 1043 mii->xmii_mode[port] == XMII_MODE_SGMII) 1044 phylink_set(mask, 1000baseT_Full); 1045 1046 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 1047 bitmap_and(state->advertising, state->advertising, mask, 1048 __ETHTOOL_LINK_MODE_MASK_NBITS); 1049 } 1050 1051 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port, 1052 struct phylink_link_state *state) 1053 { 1054 struct sja1105_private *priv = ds->priv; 1055 int ais; 1056 1057 /* Read the vendor-specific AUTONEG_INTR_STATUS register */ 1058 ais = sja1105_sgmii_read(priv, SJA1105_AIS); 1059 if (ais < 0) 1060 return ais; 1061 1062 switch (SJA1105_AIS_SPEED(ais)) { 1063 case 0: 1064 state->speed = SPEED_10; 1065 break; 1066 case 1: 1067 state->speed = SPEED_100; 1068 break; 1069 case 2: 1070 state->speed = SPEED_1000; 1071 break; 1072 default: 1073 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n", 1074 SJA1105_AIS_SPEED(ais)); 1075 } 1076 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais); 1077 state->an_complete = SJA1105_AIS_COMPLETE(ais); 1078 state->link = SJA1105_AIS_LINK_STATUS(ais); 1079 1080 return 0; 1081 } 1082 1083 static int 1084 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 1085 const struct sja1105_l2_lookup_entry *requested) 1086 { 1087 struct sja1105_l2_lookup_entry *l2_lookup; 1088 struct sja1105_table *table; 1089 int i; 1090 1091 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1092 l2_lookup = table->entries; 1093 1094 for (i = 0; i < table->entry_count; i++) 1095 if (l2_lookup[i].macaddr == requested->macaddr && 1096 l2_lookup[i].vlanid == requested->vlanid && 1097 l2_lookup[i].destports & BIT(port)) 1098 return i; 1099 1100 return -1; 1101 } 1102 1103 /* We want FDB entries added statically through the bridge command to persist 1104 * across switch resets, which are a common thing during normal SJA1105 1105 * operation. So we have to back them up in the static configuration tables 1106 * and hence apply them on next static config upload... yay! 1107 */ 1108 static int 1109 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 1110 const struct sja1105_l2_lookup_entry *requested, 1111 bool keep) 1112 { 1113 struct sja1105_l2_lookup_entry *l2_lookup; 1114 struct sja1105_table *table; 1115 int rc, match; 1116 1117 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1118 1119 match = sja1105_find_static_fdb_entry(priv, port, requested); 1120 if (match < 0) { 1121 /* Can't delete a missing entry. */ 1122 if (!keep) 1123 return 0; 1124 1125 /* No match => new entry */ 1126 rc = sja1105_table_resize(table, table->entry_count + 1); 1127 if (rc) 1128 return rc; 1129 1130 match = table->entry_count - 1; 1131 } 1132 1133 /* Assign pointer after the resize (it may be new memory) */ 1134 l2_lookup = table->entries; 1135 1136 /* We have a match. 1137 * If the job was to add this FDB entry, it's already done (mostly 1138 * anyway, since the port forwarding mask may have changed, case in 1139 * which we update it). 1140 * Otherwise we have to delete it. 1141 */ 1142 if (keep) { 1143 l2_lookup[match] = *requested; 1144 return 0; 1145 } 1146 1147 /* To remove, the strategy is to overwrite the element with 1148 * the last one, and then reduce the array size by 1 1149 */ 1150 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 1151 return sja1105_table_resize(table, table->entry_count - 1); 1152 } 1153 1154 /* First-generation switches have a 4-way set associative TCAM that 1155 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 1156 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 1157 * For the placement of a newly learnt FDB entry, the switch selects the bin 1158 * based on a hash function, and the way within that bin incrementally. 1159 */ 1160 static int sja1105et_fdb_index(int bin, int way) 1161 { 1162 return bin * SJA1105ET_FDB_BIN_SIZE + way; 1163 } 1164 1165 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 1166 const u8 *addr, u16 vid, 1167 struct sja1105_l2_lookup_entry *match, 1168 int *last_unused) 1169 { 1170 int way; 1171 1172 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 1173 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1174 int index = sja1105et_fdb_index(bin, way); 1175 1176 /* Skip unused entries, optionally marking them 1177 * into the return value 1178 */ 1179 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1180 index, &l2_lookup)) { 1181 if (last_unused) 1182 *last_unused = way; 1183 continue; 1184 } 1185 1186 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 1187 l2_lookup.vlanid == vid) { 1188 if (match) 1189 *match = l2_lookup; 1190 return way; 1191 } 1192 } 1193 /* Return an invalid entry index if not found */ 1194 return -1; 1195 } 1196 1197 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 1198 const unsigned char *addr, u16 vid) 1199 { 1200 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1201 struct sja1105_private *priv = ds->priv; 1202 struct device *dev = ds->dev; 1203 int last_unused = -1; 1204 int bin, way, rc; 1205 1206 bin = sja1105et_fdb_hash(priv, addr, vid); 1207 1208 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1209 &l2_lookup, &last_unused); 1210 if (way >= 0) { 1211 /* We have an FDB entry. Is our port in the destination 1212 * mask? If yes, we need to do nothing. If not, we need 1213 * to rewrite the entry by adding this port to it. 1214 */ 1215 if (l2_lookup.destports & BIT(port)) 1216 return 0; 1217 l2_lookup.destports |= BIT(port); 1218 } else { 1219 int index = sja1105et_fdb_index(bin, way); 1220 1221 /* We don't have an FDB entry. We construct a new one and 1222 * try to find a place for it within the FDB table. 1223 */ 1224 l2_lookup.macaddr = ether_addr_to_u64(addr); 1225 l2_lookup.destports = BIT(port); 1226 l2_lookup.vlanid = vid; 1227 1228 if (last_unused >= 0) { 1229 way = last_unused; 1230 } else { 1231 /* Bin is full, need to evict somebody. 1232 * Choose victim at random. If you get these messages 1233 * often, you may need to consider changing the 1234 * distribution function: 1235 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1236 */ 1237 get_random_bytes(&way, sizeof(u8)); 1238 way %= SJA1105ET_FDB_BIN_SIZE; 1239 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1240 bin, addr, way); 1241 /* Evict entry */ 1242 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1243 index, NULL, false); 1244 } 1245 } 1246 l2_lookup.index = sja1105et_fdb_index(bin, way); 1247 1248 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1249 l2_lookup.index, &l2_lookup, 1250 true); 1251 if (rc < 0) 1252 return rc; 1253 1254 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1255 } 1256 1257 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1258 const unsigned char *addr, u16 vid) 1259 { 1260 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1261 struct sja1105_private *priv = ds->priv; 1262 int index, bin, way, rc; 1263 bool keep; 1264 1265 bin = sja1105et_fdb_hash(priv, addr, vid); 1266 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1267 &l2_lookup, NULL); 1268 if (way < 0) 1269 return 0; 1270 index = sja1105et_fdb_index(bin, way); 1271 1272 /* We have an FDB entry. Is our port in the destination mask? If yes, 1273 * we need to remove it. If the resulting port mask becomes empty, we 1274 * need to completely evict the FDB entry. 1275 * Otherwise we just write it back. 1276 */ 1277 l2_lookup.destports &= ~BIT(port); 1278 1279 if (l2_lookup.destports) 1280 keep = true; 1281 else 1282 keep = false; 1283 1284 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1285 index, &l2_lookup, keep); 1286 if (rc < 0) 1287 return rc; 1288 1289 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1290 } 1291 1292 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1293 const unsigned char *addr, u16 vid) 1294 { 1295 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1296 struct sja1105_private *priv = ds->priv; 1297 int rc, i; 1298 1299 /* Search for an existing entry in the FDB table */ 1300 l2_lookup.macaddr = ether_addr_to_u64(addr); 1301 l2_lookup.vlanid = vid; 1302 l2_lookup.iotag = SJA1105_S_TAG; 1303 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1304 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { 1305 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1306 l2_lookup.mask_iotag = BIT(0); 1307 } else { 1308 l2_lookup.mask_vlanid = 0; 1309 l2_lookup.mask_iotag = 0; 1310 } 1311 l2_lookup.destports = BIT(port); 1312 1313 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1314 SJA1105_SEARCH, &l2_lookup); 1315 if (rc == 0) { 1316 /* Found and this port is already in the entry's 1317 * port mask => job done 1318 */ 1319 if (l2_lookup.destports & BIT(port)) 1320 return 0; 1321 /* l2_lookup.index is populated by the switch in case it 1322 * found something. 1323 */ 1324 l2_lookup.destports |= BIT(port); 1325 goto skip_finding_an_index; 1326 } 1327 1328 /* Not found, so try to find an unused spot in the FDB. 1329 * This is slightly inefficient because the strategy is knock-knock at 1330 * every possible position from 0 to 1023. 1331 */ 1332 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1333 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1334 i, NULL); 1335 if (rc < 0) 1336 break; 1337 } 1338 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1339 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1340 return -EINVAL; 1341 } 1342 l2_lookup.lockeds = true; 1343 l2_lookup.index = i; 1344 1345 skip_finding_an_index: 1346 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1347 l2_lookup.index, &l2_lookup, 1348 true); 1349 if (rc < 0) 1350 return rc; 1351 1352 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1353 } 1354 1355 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1356 const unsigned char *addr, u16 vid) 1357 { 1358 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1359 struct sja1105_private *priv = ds->priv; 1360 bool keep; 1361 int rc; 1362 1363 l2_lookup.macaddr = ether_addr_to_u64(addr); 1364 l2_lookup.vlanid = vid; 1365 l2_lookup.iotag = SJA1105_S_TAG; 1366 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1367 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { 1368 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1369 l2_lookup.mask_iotag = BIT(0); 1370 } else { 1371 l2_lookup.mask_vlanid = 0; 1372 l2_lookup.mask_iotag = 0; 1373 } 1374 l2_lookup.destports = BIT(port); 1375 1376 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1377 SJA1105_SEARCH, &l2_lookup); 1378 if (rc < 0) 1379 return 0; 1380 1381 l2_lookup.destports &= ~BIT(port); 1382 1383 /* Decide whether we remove just this port from the FDB entry, 1384 * or if we remove it completely. 1385 */ 1386 if (l2_lookup.destports) 1387 keep = true; 1388 else 1389 keep = false; 1390 1391 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1392 l2_lookup.index, &l2_lookup, keep); 1393 if (rc < 0) 1394 return rc; 1395 1396 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1397 } 1398 1399 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1400 const unsigned char *addr, u16 vid) 1401 { 1402 struct sja1105_private *priv = ds->priv; 1403 1404 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1405 * so the switch still does some VLAN processing internally. 1406 * But Shared VLAN Learning (SVL) is also active, and it will take 1407 * care of autonomous forwarding between the unique pvid's of each 1408 * port. Here we just make sure that users can't add duplicate FDB 1409 * entries when in this mode - the actual VID doesn't matter except 1410 * for what gets printed in 'bridge fdb show'. In the case of zero, 1411 * no VID gets printed at all. 1412 */ 1413 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1414 vid = 0; 1415 1416 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1417 } 1418 1419 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1420 const unsigned char *addr, u16 vid) 1421 { 1422 struct sja1105_private *priv = ds->priv; 1423 1424 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1425 vid = 0; 1426 1427 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1428 } 1429 1430 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1431 dsa_fdb_dump_cb_t *cb, void *data) 1432 { 1433 struct sja1105_private *priv = ds->priv; 1434 struct device *dev = ds->dev; 1435 int i; 1436 1437 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1438 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1439 u8 macaddr[ETH_ALEN]; 1440 int rc; 1441 1442 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1443 i, &l2_lookup); 1444 /* No fdb entry at i, not an issue */ 1445 if (rc == -ENOENT) 1446 continue; 1447 if (rc) { 1448 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1449 return rc; 1450 } 1451 1452 /* FDB dump callback is per port. This means we have to 1453 * disregard a valid entry if it's not for this port, even if 1454 * only to revisit it later. This is inefficient because the 1455 * 1024-sized FDB table needs to be traversed 4 times through 1456 * SPI during a 'bridge fdb show' command. 1457 */ 1458 if (!(l2_lookup.destports & BIT(port))) 1459 continue; 1460 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1461 1462 /* We need to hide the dsa_8021q VLANs from the user. */ 1463 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1464 l2_lookup.vlanid = 0; 1465 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1466 } 1467 return 0; 1468 } 1469 1470 /* This callback needs to be present */ 1471 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port, 1472 const struct switchdev_obj_port_mdb *mdb) 1473 { 1474 return 0; 1475 } 1476 1477 static void sja1105_mdb_add(struct dsa_switch *ds, int port, 1478 const struct switchdev_obj_port_mdb *mdb) 1479 { 1480 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1481 } 1482 1483 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1484 const struct switchdev_obj_port_mdb *mdb) 1485 { 1486 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1487 } 1488 1489 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1490 struct net_device *br, bool member) 1491 { 1492 struct sja1105_l2_forwarding_entry *l2_fwd; 1493 struct sja1105_private *priv = ds->priv; 1494 int i, rc; 1495 1496 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1497 1498 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1499 /* Add this port to the forwarding matrix of the 1500 * other ports in the same bridge, and viceversa. 1501 */ 1502 if (!dsa_is_user_port(ds, i)) 1503 continue; 1504 /* For the ports already under the bridge, only one thing needs 1505 * to be done, and that is to add this port to their 1506 * reachability domain. So we can perform the SPI write for 1507 * them immediately. However, for this port itself (the one 1508 * that is new to the bridge), we need to add all other ports 1509 * to its reachability domain. So we do that incrementally in 1510 * this loop, and perform the SPI write only at the end, once 1511 * the domain contains all other bridge ports. 1512 */ 1513 if (i == port) 1514 continue; 1515 if (dsa_to_port(ds, i)->bridge_dev != br) 1516 continue; 1517 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1518 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1519 1520 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1521 i, &l2_fwd[i], true); 1522 if (rc < 0) 1523 return rc; 1524 } 1525 1526 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1527 port, &l2_fwd[port], true); 1528 } 1529 1530 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1531 u8 state) 1532 { 1533 struct sja1105_private *priv = ds->priv; 1534 struct sja1105_mac_config_entry *mac; 1535 1536 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1537 1538 switch (state) { 1539 case BR_STATE_DISABLED: 1540 case BR_STATE_BLOCKING: 1541 /* From UM10944 description of DRPDTAG (why put this there?): 1542 * "Management traffic flows to the port regardless of the state 1543 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1544 * At the moment no difference between DISABLED and BLOCKING. 1545 */ 1546 mac[port].ingress = false; 1547 mac[port].egress = false; 1548 mac[port].dyn_learn = false; 1549 break; 1550 case BR_STATE_LISTENING: 1551 mac[port].ingress = true; 1552 mac[port].egress = false; 1553 mac[port].dyn_learn = false; 1554 break; 1555 case BR_STATE_LEARNING: 1556 mac[port].ingress = true; 1557 mac[port].egress = false; 1558 mac[port].dyn_learn = true; 1559 break; 1560 case BR_STATE_FORWARDING: 1561 mac[port].ingress = true; 1562 mac[port].egress = true; 1563 mac[port].dyn_learn = true; 1564 break; 1565 default: 1566 dev_err(ds->dev, "invalid STP state: %d\n", state); 1567 return; 1568 } 1569 1570 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1571 &mac[port], true); 1572 } 1573 1574 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1575 struct net_device *br) 1576 { 1577 return sja1105_bridge_member(ds, port, br, true); 1578 } 1579 1580 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1581 struct net_device *br) 1582 { 1583 sja1105_bridge_member(ds, port, br, false); 1584 } 1585 1586 static const char * const sja1105_reset_reasons[] = { 1587 [SJA1105_VLAN_FILTERING] = "VLAN filtering", 1588 [SJA1105_RX_HWTSTAMPING] = "RX timestamping", 1589 [SJA1105_AGEING_TIME] = "Ageing time", 1590 [SJA1105_SCHEDULING] = "Time-aware scheduling", 1591 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing", 1592 }; 1593 1594 /* For situations where we need to change a setting at runtime that is only 1595 * available through the static configuration, resetting the switch in order 1596 * to upload the new static config is unavoidable. Back up the settings we 1597 * modify at runtime (currently only MAC) and restore them after uploading, 1598 * such that this operation is relatively seamless. 1599 */ 1600 int sja1105_static_config_reload(struct sja1105_private *priv, 1601 enum sja1105_reset_reason reason) 1602 { 1603 struct ptp_system_timestamp ptp_sts_before; 1604 struct ptp_system_timestamp ptp_sts_after; 1605 struct sja1105_mac_config_entry *mac; 1606 int speed_mbps[SJA1105_NUM_PORTS]; 1607 struct dsa_switch *ds = priv->ds; 1608 s64 t1, t2, t3, t4; 1609 s64 t12, t34; 1610 u16 bmcr = 0; 1611 int rc, i; 1612 s64 now; 1613 1614 mutex_lock(&priv->mgmt_lock); 1615 1616 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1617 1618 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1619 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1620 * switch wants to see in the static config in order to allow us to 1621 * change it through the dynamic interface later. 1622 */ 1623 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1624 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1625 mac[i].speed = SJA1105_SPEED_AUTO; 1626 } 1627 1628 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) 1629 bmcr = sja1105_sgmii_read(priv, MII_BMCR); 1630 1631 /* No PTP operations can run right now */ 1632 mutex_lock(&priv->ptp_data.lock); 1633 1634 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before); 1635 if (rc < 0) 1636 goto out_unlock_ptp; 1637 1638 /* Reset switch and send updated static configuration */ 1639 rc = sja1105_static_config_upload(priv); 1640 if (rc < 0) 1641 goto out_unlock_ptp; 1642 1643 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after); 1644 if (rc < 0) 1645 goto out_unlock_ptp; 1646 1647 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts); 1648 t2 = timespec64_to_ns(&ptp_sts_before.post_ts); 1649 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts); 1650 t4 = timespec64_to_ns(&ptp_sts_after.post_ts); 1651 /* Mid point, corresponds to pre-reset PTPCLKVAL */ 1652 t12 = t1 + (t2 - t1) / 2; 1653 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ 1654 t34 = t3 + (t4 - t3) / 2; 1655 /* Advance PTPCLKVAL by the time it took since its readout */ 1656 now += (t34 - t12); 1657 1658 __sja1105_ptp_adjtime(ds, now); 1659 1660 out_unlock_ptp: 1661 mutex_unlock(&priv->ptp_data.lock); 1662 1663 dev_info(priv->ds->dev, 1664 "Reset switch and programmed static config. Reason: %s\n", 1665 sja1105_reset_reasons[reason]); 1666 1667 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1668 * For these interfaces there is no dynamic configuration 1669 * needed, since PLLs have same settings at all speeds. 1670 */ 1671 rc = sja1105_clocking_setup(priv); 1672 if (rc < 0) 1673 goto out; 1674 1675 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1676 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1677 if (rc < 0) 1678 goto out; 1679 } 1680 1681 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) { 1682 bool an_enabled = !!(bmcr & BMCR_ANENABLE); 1683 1684 sja1105_sgmii_pcs_config(priv, an_enabled, false); 1685 1686 if (!an_enabled) { 1687 int speed = SPEED_UNKNOWN; 1688 1689 if (bmcr & BMCR_SPEED1000) 1690 speed = SPEED_1000; 1691 else if (bmcr & BMCR_SPEED100) 1692 speed = SPEED_100; 1693 else if (bmcr & BMCR_SPEED10) 1694 speed = SPEED_10; 1695 1696 sja1105_sgmii_pcs_force_speed(priv, speed); 1697 } 1698 } 1699 out: 1700 mutex_unlock(&priv->mgmt_lock); 1701 1702 return rc; 1703 } 1704 1705 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1706 { 1707 struct sja1105_mac_config_entry *mac; 1708 1709 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1710 1711 mac[port].vlanid = pvid; 1712 1713 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1714 &mac[port], true); 1715 } 1716 1717 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 1718 { 1719 struct sja1105_vlan_lookup_entry *vlan; 1720 int count, i; 1721 1722 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 1723 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 1724 1725 for (i = 0; i < count; i++) 1726 if (vlan[i].vlanid == vid) 1727 return i; 1728 1729 /* Return an invalid entry index if not found */ 1730 return -1; 1731 } 1732 1733 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid, 1734 bool enabled, bool untagged) 1735 { 1736 struct sja1105_vlan_lookup_entry *vlan; 1737 struct sja1105_table *table; 1738 bool keep = true; 1739 int match, rc; 1740 1741 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 1742 1743 match = sja1105_is_vlan_configured(priv, vid); 1744 if (match < 0) { 1745 /* Can't delete a missing entry. */ 1746 if (!enabled) 1747 return 0; 1748 rc = sja1105_table_resize(table, table->entry_count + 1); 1749 if (rc) 1750 return rc; 1751 match = table->entry_count - 1; 1752 } 1753 /* Assign pointer after the resize (it's new memory) */ 1754 vlan = table->entries; 1755 vlan[match].vlanid = vid; 1756 if (enabled) { 1757 vlan[match].vlan_bc |= BIT(port); 1758 vlan[match].vmemb_port |= BIT(port); 1759 } else { 1760 vlan[match].vlan_bc &= ~BIT(port); 1761 vlan[match].vmemb_port &= ~BIT(port); 1762 } 1763 /* Also unset tag_port if removing this VLAN was requested, 1764 * just so we don't have a confusing bitmap (no practical purpose). 1765 */ 1766 if (untagged || !enabled) 1767 vlan[match].tag_port &= ~BIT(port); 1768 else 1769 vlan[match].tag_port |= BIT(port); 1770 /* If there's no port left as member of this VLAN, 1771 * it's time for it to go. 1772 */ 1773 if (!vlan[match].vmemb_port) 1774 keep = false; 1775 1776 dev_dbg(priv->ds->dev, 1777 "%s: port %d, vid %llu, broadcast domain 0x%llx, " 1778 "port members 0x%llx, tagged ports 0x%llx, keep %d\n", 1779 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc, 1780 vlan[match].vmemb_port, vlan[match].tag_port, keep); 1781 1782 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, 1783 &vlan[match], keep); 1784 if (rc < 0) 1785 return rc; 1786 1787 if (!keep) 1788 return sja1105_table_delete_entry(table, match); 1789 1790 return 0; 1791 } 1792 1793 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 1794 { 1795 int rc, i; 1796 1797 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1798 rc = dsa_port_setup_8021q_tagging(ds, i, enabled); 1799 if (rc < 0) { 1800 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n", 1801 i, rc); 1802 return rc; 1803 } 1804 } 1805 dev_info(ds->dev, "%s switch tagging\n", 1806 enabled ? "Enabled" : "Disabled"); 1807 return 0; 1808 } 1809 1810 static enum dsa_tag_protocol 1811 sja1105_get_tag_protocol(struct dsa_switch *ds, int port, 1812 enum dsa_tag_protocol mp) 1813 { 1814 return DSA_TAG_PROTO_SJA1105; 1815 } 1816 1817 /* This callback needs to be present */ 1818 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, 1819 const struct switchdev_obj_port_vlan *vlan) 1820 { 1821 return 0; 1822 } 1823 1824 /* The TPID setting belongs to the General Parameters table, 1825 * which can only be partially reconfigured at runtime (and not the TPID). 1826 * So a switch reset is required. 1827 */ 1828 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) 1829 { 1830 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1831 struct sja1105_general_params_entry *general_params; 1832 struct sja1105_private *priv = ds->priv; 1833 struct sja1105_table *table; 1834 u16 tpid, tpid2; 1835 int rc; 1836 1837 if (enabled) { 1838 /* Enable VLAN filtering. */ 1839 tpid = ETH_P_8021Q; 1840 tpid2 = ETH_P_8021AD; 1841 } else { 1842 /* Disable VLAN filtering. */ 1843 tpid = ETH_P_SJA1105; 1844 tpid2 = ETH_P_SJA1105; 1845 } 1846 1847 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1848 general_params = table->entries; 1849 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 1850 general_params->tpid = tpid; 1851 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 1852 general_params->tpid2 = tpid2; 1853 /* When VLAN filtering is on, we need to at least be able to 1854 * decode management traffic through the "backup plan". 1855 */ 1856 general_params->incl_srcpt1 = enabled; 1857 general_params->incl_srcpt0 = enabled; 1858 1859 /* VLAN filtering => independent VLAN learning. 1860 * No VLAN filtering => shared VLAN learning. 1861 * 1862 * In shared VLAN learning mode, untagged traffic still gets 1863 * pvid-tagged, and the FDB table gets populated with entries 1864 * containing the "real" (pvid or from VLAN tag) VLAN ID. 1865 * However the switch performs a masked L2 lookup in the FDB, 1866 * effectively only looking up a frame's DMAC (and not VID) for the 1867 * forwarding decision. 1868 * 1869 * This is extremely convenient for us, because in modes with 1870 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 1871 * each front panel port. This is good for identification but breaks 1872 * learning badly - the VID of the learnt FDB entry is unique, aka 1873 * no frames coming from any other port are going to have it. So 1874 * for forwarding purposes, this is as though learning was broken 1875 * (all frames get flooded). 1876 */ 1877 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1878 l2_lookup_params = table->entries; 1879 l2_lookup_params->shared_learn = !enabled; 1880 1881 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); 1882 if (rc) 1883 dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); 1884 1885 /* Switch port identification based on 802.1Q is only passable 1886 * if we are not under a vlan_filtering bridge. So make sure 1887 * the two configurations are mutually exclusive. 1888 */ 1889 return sja1105_setup_8021q_tagging(ds, !enabled); 1890 } 1891 1892 static void sja1105_vlan_add(struct dsa_switch *ds, int port, 1893 const struct switchdev_obj_port_vlan *vlan) 1894 { 1895 struct sja1105_private *priv = ds->priv; 1896 u16 vid; 1897 int rc; 1898 1899 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1900 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags & 1901 BRIDGE_VLAN_INFO_UNTAGGED); 1902 if (rc < 0) { 1903 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 1904 vid, port, rc); 1905 return; 1906 } 1907 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1908 rc = sja1105_pvid_apply(ds->priv, port, vid); 1909 if (rc < 0) { 1910 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n", 1911 vid, port, rc); 1912 return; 1913 } 1914 } 1915 } 1916 } 1917 1918 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 1919 const struct switchdev_obj_port_vlan *vlan) 1920 { 1921 struct sja1105_private *priv = ds->priv; 1922 u16 vid; 1923 int rc; 1924 1925 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1926 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags & 1927 BRIDGE_VLAN_INFO_UNTAGGED); 1928 if (rc < 0) { 1929 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", 1930 vid, port, rc); 1931 return rc; 1932 } 1933 } 1934 return 0; 1935 } 1936 1937 /* The programming model for the SJA1105 switch is "all-at-once" via static 1938 * configuration tables. Some of these can be dynamically modified at runtime, 1939 * but not the xMII mode parameters table. 1940 * Furthermode, some PHYs may not have crystals for generating their clocks 1941 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 1942 * ref_clk pin. So port clocking needs to be initialized early, before 1943 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 1944 * Setting correct PHY link speed does not matter now. 1945 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 1946 * bindings are not yet parsed by DSA core. We need to parse early so that we 1947 * can populate the xMII mode parameters table. 1948 */ 1949 static int sja1105_setup(struct dsa_switch *ds) 1950 { 1951 struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; 1952 struct sja1105_private *priv = ds->priv; 1953 int rc; 1954 1955 rc = sja1105_parse_dt(priv, ports); 1956 if (rc < 0) { 1957 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 1958 return rc; 1959 } 1960 1961 /* Error out early if internal delays are required through DT 1962 * and we can't apply them. 1963 */ 1964 rc = sja1105_parse_rgmii_delays(priv, ports); 1965 if (rc < 0) { 1966 dev_err(ds->dev, "RGMII delay not supported\n"); 1967 return rc; 1968 } 1969 1970 rc = sja1105_ptp_clock_register(ds); 1971 if (rc < 0) { 1972 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 1973 return rc; 1974 } 1975 /* Create and send configuration down to device */ 1976 rc = sja1105_static_config_load(priv, ports); 1977 if (rc < 0) { 1978 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 1979 return rc; 1980 } 1981 /* Configure the CGU (PHY link modes and speeds) */ 1982 rc = sja1105_clocking_setup(priv); 1983 if (rc < 0) { 1984 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 1985 return rc; 1986 } 1987 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 1988 * The only thing we can do to disable it is lie about what the 802.1Q 1989 * EtherType is. 1990 * So it will still try to apply VLAN filtering, but all ingress 1991 * traffic (except frames received with EtherType of ETH_P_SJA1105) 1992 * will be internally tagged with a distorted VLAN header where the 1993 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 1994 */ 1995 ds->vlan_filtering_is_global = true; 1996 1997 /* Advertise the 8 egress queues */ 1998 ds->num_tx_queues = SJA1105_NUM_TC; 1999 2000 ds->mtu_enforcement_ingress = true; 2001 2002 /* The DSA/switchdev model brings up switch ports in standalone mode by 2003 * default, and that means vlan_filtering is 0 since they're not under 2004 * a bridge, so it's safe to set up switch tagging at this time. 2005 */ 2006 return sja1105_setup_8021q_tagging(ds, true); 2007 } 2008 2009 static void sja1105_teardown(struct dsa_switch *ds) 2010 { 2011 struct sja1105_private *priv = ds->priv; 2012 int port; 2013 2014 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2015 struct sja1105_port *sp = &priv->ports[port]; 2016 2017 if (!dsa_is_user_port(ds, port)) 2018 continue; 2019 2020 if (sp->xmit_worker) 2021 kthread_destroy_worker(sp->xmit_worker); 2022 } 2023 2024 sja1105_flower_teardown(ds); 2025 sja1105_tas_teardown(ds); 2026 sja1105_ptp_clock_unregister(ds); 2027 sja1105_static_config_free(&priv->static_config); 2028 } 2029 2030 static int sja1105_port_enable(struct dsa_switch *ds, int port, 2031 struct phy_device *phy) 2032 { 2033 struct net_device *slave; 2034 2035 if (!dsa_is_user_port(ds, port)) 2036 return 0; 2037 2038 slave = dsa_to_port(ds, port)->slave; 2039 2040 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2041 2042 return 0; 2043 } 2044 2045 static void sja1105_port_disable(struct dsa_switch *ds, int port) 2046 { 2047 struct sja1105_private *priv = ds->priv; 2048 struct sja1105_port *sp = &priv->ports[port]; 2049 2050 if (!dsa_is_user_port(ds, port)) 2051 return; 2052 2053 kthread_cancel_work_sync(&sp->xmit_work); 2054 skb_queue_purge(&sp->xmit_queue); 2055 } 2056 2057 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 2058 struct sk_buff *skb, bool takets) 2059 { 2060 struct sja1105_mgmt_entry mgmt_route = {0}; 2061 struct sja1105_private *priv = ds->priv; 2062 struct ethhdr *hdr; 2063 int timeout = 10; 2064 int rc; 2065 2066 hdr = eth_hdr(skb); 2067 2068 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 2069 mgmt_route.destports = BIT(port); 2070 mgmt_route.enfport = 1; 2071 mgmt_route.tsreg = 0; 2072 mgmt_route.takets = takets; 2073 2074 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 2075 slot, &mgmt_route, true); 2076 if (rc < 0) { 2077 kfree_skb(skb); 2078 return rc; 2079 } 2080 2081 /* Transfer skb to the host port. */ 2082 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); 2083 2084 /* Wait until the switch has processed the frame */ 2085 do { 2086 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 2087 slot, &mgmt_route); 2088 if (rc < 0) { 2089 dev_err_ratelimited(priv->ds->dev, 2090 "failed to poll for mgmt route\n"); 2091 continue; 2092 } 2093 2094 /* UM10944: The ENFPORT flag of the respective entry is 2095 * cleared when a match is found. The host can use this 2096 * flag as an acknowledgment. 2097 */ 2098 cpu_relax(); 2099 } while (mgmt_route.enfport && --timeout); 2100 2101 if (!timeout) { 2102 /* Clean up the management route so that a follow-up 2103 * frame may not match on it by mistake. 2104 * This is only hardware supported on P/Q/R/S - on E/T it is 2105 * a no-op and we are silently discarding the -EOPNOTSUPP. 2106 */ 2107 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 2108 slot, &mgmt_route, false); 2109 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 2110 } 2111 2112 return NETDEV_TX_OK; 2113 } 2114 2115 #define work_to_port(work) \ 2116 container_of((work), struct sja1105_port, xmit_work) 2117 #define tagger_to_sja1105(t) \ 2118 container_of((t), struct sja1105_private, tagger_data) 2119 2120 /* Deferred work is unfortunately necessary because setting up the management 2121 * route cannot be done from atomit context (SPI transfer takes a sleepable 2122 * lock on the bus) 2123 */ 2124 static void sja1105_port_deferred_xmit(struct kthread_work *work) 2125 { 2126 struct sja1105_port *sp = work_to_port(work); 2127 struct sja1105_tagger_data *tagger_data = sp->data; 2128 struct sja1105_private *priv = tagger_to_sja1105(tagger_data); 2129 int port = sp - priv->ports; 2130 struct sk_buff *skb; 2131 2132 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { 2133 struct sk_buff *clone = DSA_SKB_CB(skb)->clone; 2134 2135 mutex_lock(&priv->mgmt_lock); 2136 2137 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); 2138 2139 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 2140 if (clone) 2141 sja1105_ptp_txtstamp_skb(priv->ds, port, clone); 2142 2143 mutex_unlock(&priv->mgmt_lock); 2144 } 2145 } 2146 2147 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 2148 * which cannot be reconfigured at runtime. So a switch reset is required. 2149 */ 2150 static int sja1105_set_ageing_time(struct dsa_switch *ds, 2151 unsigned int ageing_time) 2152 { 2153 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 2154 struct sja1105_private *priv = ds->priv; 2155 struct sja1105_table *table; 2156 unsigned int maxage; 2157 2158 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 2159 l2_lookup_params = table->entries; 2160 2161 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 2162 2163 if (l2_lookup_params->maxage == maxage) 2164 return 0; 2165 2166 l2_lookup_params->maxage = maxage; 2167 2168 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); 2169 } 2170 2171 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 2172 { 2173 struct sja1105_l2_policing_entry *policing; 2174 struct sja1105_private *priv = ds->priv; 2175 2176 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN; 2177 2178 if (dsa_is_cpu_port(ds, port)) 2179 new_mtu += VLAN_HLEN; 2180 2181 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 2182 2183 if (policing[port].maxlen == new_mtu) 2184 return 0; 2185 2186 policing[port].maxlen = new_mtu; 2187 2188 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 2189 } 2190 2191 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port) 2192 { 2193 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; 2194 } 2195 2196 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, 2197 enum tc_setup_type type, 2198 void *type_data) 2199 { 2200 switch (type) { 2201 case TC_SETUP_QDISC_TAPRIO: 2202 return sja1105_setup_tc_taprio(ds, port, type_data); 2203 default: 2204 return -EOPNOTSUPP; 2205 } 2206 } 2207 2208 /* We have a single mirror (@to) port, but can configure ingress and egress 2209 * mirroring on all other (@from) ports. 2210 * We need to allow mirroring rules only as long as the @to port is always the 2211 * same, and we need to unset the @to port from mirr_port only when there is no 2212 * mirroring rule that references it. 2213 */ 2214 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, 2215 bool ingress, bool enabled) 2216 { 2217 struct sja1105_general_params_entry *general_params; 2218 struct sja1105_mac_config_entry *mac; 2219 struct sja1105_table *table; 2220 bool already_enabled; 2221 u64 new_mirr_port; 2222 int rc; 2223 2224 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 2225 general_params = table->entries; 2226 2227 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 2228 2229 already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS); 2230 if (already_enabled && enabled && general_params->mirr_port != to) { 2231 dev_err(priv->ds->dev, 2232 "Delete mirroring rules towards port %llu first\n", 2233 general_params->mirr_port); 2234 return -EBUSY; 2235 } 2236 2237 new_mirr_port = to; 2238 if (!enabled) { 2239 bool keep = false; 2240 int port; 2241 2242 /* Anybody still referencing mirr_port? */ 2243 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2244 if (mac[port].ing_mirr || mac[port].egr_mirr) { 2245 keep = true; 2246 break; 2247 } 2248 } 2249 /* Unset already_enabled for next time */ 2250 if (!keep) 2251 new_mirr_port = SJA1105_NUM_PORTS; 2252 } 2253 if (new_mirr_port != general_params->mirr_port) { 2254 general_params->mirr_port = new_mirr_port; 2255 2256 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS, 2257 0, general_params, true); 2258 if (rc < 0) 2259 return rc; 2260 } 2261 2262 if (ingress) 2263 mac[from].ing_mirr = enabled; 2264 else 2265 mac[from].egr_mirr = enabled; 2266 2267 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from, 2268 &mac[from], true); 2269 } 2270 2271 static int sja1105_mirror_add(struct dsa_switch *ds, int port, 2272 struct dsa_mall_mirror_tc_entry *mirror, 2273 bool ingress) 2274 { 2275 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 2276 ingress, true); 2277 } 2278 2279 static void sja1105_mirror_del(struct dsa_switch *ds, int port, 2280 struct dsa_mall_mirror_tc_entry *mirror) 2281 { 2282 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 2283 mirror->ingress, false); 2284 } 2285 2286 static int sja1105_port_policer_add(struct dsa_switch *ds, int port, 2287 struct dsa_mall_policer_tc_entry *policer) 2288 { 2289 struct sja1105_l2_policing_entry *policing; 2290 struct sja1105_private *priv = ds->priv; 2291 2292 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 2293 2294 /* In hardware, every 8 microseconds the credit level is incremented by 2295 * the value of RATE bytes divided by 64, up to a maximum of SMAX 2296 * bytes. 2297 */ 2298 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, 2299 1000000); 2300 policing[port].smax = div_u64(policer->rate_bytes_per_sec * 2301 PSCHED_NS2TICKS(policer->burst), 2302 PSCHED_TICKS_PER_SEC); 2303 2304 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 2305 } 2306 2307 static void sja1105_port_policer_del(struct dsa_switch *ds, int port) 2308 { 2309 struct sja1105_l2_policing_entry *policing; 2310 struct sja1105_private *priv = ds->priv; 2311 2312 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 2313 2314 policing[port].rate = SJA1105_RATE_MBPS(1000); 2315 policing[port].smax = 65535; 2316 2317 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 2318 } 2319 2320 static const struct dsa_switch_ops sja1105_switch_ops = { 2321 .get_tag_protocol = sja1105_get_tag_protocol, 2322 .setup = sja1105_setup, 2323 .teardown = sja1105_teardown, 2324 .set_ageing_time = sja1105_set_ageing_time, 2325 .port_change_mtu = sja1105_change_mtu, 2326 .port_max_mtu = sja1105_get_max_mtu, 2327 .phylink_validate = sja1105_phylink_validate, 2328 .phylink_mac_link_state = sja1105_mac_pcs_get_state, 2329 .phylink_mac_config = sja1105_mac_config, 2330 .phylink_mac_link_up = sja1105_mac_link_up, 2331 .phylink_mac_link_down = sja1105_mac_link_down, 2332 .get_strings = sja1105_get_strings, 2333 .get_ethtool_stats = sja1105_get_ethtool_stats, 2334 .get_sset_count = sja1105_get_sset_count, 2335 .get_ts_info = sja1105_get_ts_info, 2336 .port_enable = sja1105_port_enable, 2337 .port_disable = sja1105_port_disable, 2338 .port_fdb_dump = sja1105_fdb_dump, 2339 .port_fdb_add = sja1105_fdb_add, 2340 .port_fdb_del = sja1105_fdb_del, 2341 .port_bridge_join = sja1105_bridge_join, 2342 .port_bridge_leave = sja1105_bridge_leave, 2343 .port_stp_state_set = sja1105_bridge_stp_state_set, 2344 .port_vlan_prepare = sja1105_vlan_prepare, 2345 .port_vlan_filtering = sja1105_vlan_filtering, 2346 .port_vlan_add = sja1105_vlan_add, 2347 .port_vlan_del = sja1105_vlan_del, 2348 .port_mdb_prepare = sja1105_mdb_prepare, 2349 .port_mdb_add = sja1105_mdb_add, 2350 .port_mdb_del = sja1105_mdb_del, 2351 .port_hwtstamp_get = sja1105_hwtstamp_get, 2352 .port_hwtstamp_set = sja1105_hwtstamp_set, 2353 .port_rxtstamp = sja1105_port_rxtstamp, 2354 .port_txtstamp = sja1105_port_txtstamp, 2355 .port_setup_tc = sja1105_port_setup_tc, 2356 .port_mirror_add = sja1105_mirror_add, 2357 .port_mirror_del = sja1105_mirror_del, 2358 .port_policer_add = sja1105_port_policer_add, 2359 .port_policer_del = sja1105_port_policer_del, 2360 .cls_flower_add = sja1105_cls_flower_add, 2361 .cls_flower_del = sja1105_cls_flower_del, 2362 }; 2363 2364 static int sja1105_check_device_id(struct sja1105_private *priv) 2365 { 2366 const struct sja1105_regs *regs = priv->info->regs; 2367 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 2368 struct device *dev = &priv->spidev->dev; 2369 u32 device_id; 2370 u64 part_no; 2371 int rc; 2372 2373 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, 2374 NULL); 2375 if (rc < 0) 2376 return rc; 2377 2378 if (device_id != priv->info->device_id) { 2379 dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n", 2380 priv->info->device_id, device_id); 2381 return -ENODEV; 2382 } 2383 2384 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, 2385 SJA1105_SIZE_DEVICE_ID); 2386 if (rc < 0) 2387 return rc; 2388 2389 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 2390 2391 if (part_no != priv->info->part_no) { 2392 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", 2393 priv->info->part_no, part_no); 2394 return -ENODEV; 2395 } 2396 2397 return 0; 2398 } 2399 2400 static int sja1105_probe(struct spi_device *spi) 2401 { 2402 struct sja1105_tagger_data *tagger_data; 2403 struct device *dev = &spi->dev; 2404 struct sja1105_private *priv; 2405 struct dsa_switch *ds; 2406 int rc, port; 2407 2408 if (!dev->of_node) { 2409 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 2410 return -EINVAL; 2411 } 2412 2413 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 2414 if (!priv) 2415 return -ENOMEM; 2416 2417 /* Configure the optional reset pin and bring up switch */ 2418 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 2419 if (IS_ERR(priv->reset_gpio)) 2420 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 2421 else 2422 sja1105_hw_reset(priv->reset_gpio, 1, 1); 2423 2424 /* Populate our driver private structure (priv) based on 2425 * the device tree node that was probed (spi) 2426 */ 2427 priv->spidev = spi; 2428 spi_set_drvdata(spi, priv); 2429 2430 /* Configure the SPI bus */ 2431 spi->bits_per_word = 8; 2432 rc = spi_setup(spi); 2433 if (rc < 0) { 2434 dev_err(dev, "Could not init SPI\n"); 2435 return rc; 2436 } 2437 2438 priv->info = of_device_get_match_data(dev); 2439 2440 /* Detect hardware device */ 2441 rc = sja1105_check_device_id(priv); 2442 if (rc < 0) { 2443 dev_err(dev, "Device ID check failed: %d\n", rc); 2444 return rc; 2445 } 2446 2447 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 2448 2449 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); 2450 if (!ds) 2451 return -ENOMEM; 2452 2453 ds->dev = dev; 2454 ds->num_ports = SJA1105_NUM_PORTS; 2455 ds->ops = &sja1105_switch_ops; 2456 ds->priv = priv; 2457 priv->ds = ds; 2458 2459 tagger_data = &priv->tagger_data; 2460 2461 mutex_init(&priv->ptp_data.lock); 2462 mutex_init(&priv->mgmt_lock); 2463 2464 sja1105_tas_setup(ds); 2465 sja1105_flower_setup(ds); 2466 2467 rc = dsa_register_switch(priv->ds); 2468 if (rc) 2469 return rc; 2470 2471 /* Connections between dsa_port and sja1105_port */ 2472 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2473 struct sja1105_port *sp = &priv->ports[port]; 2474 struct dsa_port *dp = dsa_to_port(ds, port); 2475 struct net_device *slave; 2476 2477 if (!dsa_is_user_port(ds, port)) 2478 continue; 2479 2480 dp->priv = sp; 2481 sp->dp = dp; 2482 sp->data = tagger_data; 2483 slave = dp->slave; 2484 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); 2485 sp->xmit_worker = kthread_create_worker(0, "%s_xmit", 2486 slave->name); 2487 if (IS_ERR(sp->xmit_worker)) { 2488 rc = PTR_ERR(sp->xmit_worker); 2489 dev_err(ds->dev, 2490 "failed to create deferred xmit thread: %d\n", 2491 rc); 2492 goto out; 2493 } 2494 skb_queue_head_init(&sp->xmit_queue); 2495 } 2496 2497 return 0; 2498 out: 2499 while (port-- > 0) { 2500 struct sja1105_port *sp = &priv->ports[port]; 2501 2502 if (!dsa_is_user_port(ds, port)) 2503 continue; 2504 2505 kthread_destroy_worker(sp->xmit_worker); 2506 } 2507 return rc; 2508 } 2509 2510 static int sja1105_remove(struct spi_device *spi) 2511 { 2512 struct sja1105_private *priv = spi_get_drvdata(spi); 2513 2514 dsa_unregister_switch(priv->ds); 2515 return 0; 2516 } 2517 2518 static const struct of_device_id sja1105_dt_ids[] = { 2519 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 2520 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 2521 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 2522 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 2523 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 2524 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 2525 { /* sentinel */ }, 2526 }; 2527 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 2528 2529 static struct spi_driver sja1105_driver = { 2530 .driver = { 2531 .name = "sja1105", 2532 .owner = THIS_MODULE, 2533 .of_match_table = of_match_ptr(sja1105_dt_ids), 2534 }, 2535 .probe = sja1105_probe, 2536 .remove = sja1105_remove, 2537 }; 2538 2539 module_spi_driver(sja1105_driver); 2540 2541 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 2542 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 2543 MODULE_DESCRIPTION("SJA1105 Driver"); 2544 MODULE_LICENSE("GPL v2"); 2545