1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <net/pkt_sched.h> 25 #include <net/dsa.h> 26 #include "felix.h" 27 28 /* Translate the DSA database API into the ocelot switch library API, 29 * which uses VID 0 for all ports that aren't part of a bridge, 30 * and expects the bridge_dev to be NULL in that case. 31 */ 32 static struct net_device *felix_classify_db(struct dsa_db db) 33 { 34 switch (db.type) { 35 case DSA_DB_PORT: 36 case DSA_DB_LAG: 37 return NULL; 38 case DSA_DB_BRIDGE: 39 return db.bridge.dev; 40 default: 41 return ERR_PTR(-EOPNOTSUPP); 42 } 43 } 44 45 static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to, 46 int pgid) 47 { 48 struct ocelot *ocelot = ds->priv; 49 bool on; 50 u32 val; 51 52 val = ocelot_read_rix(ocelot, ANA_PGID_PGID, pgid); 53 on = !!(val & BIT(from)); 54 val &= ~BIT(from); 55 if (on) 56 val |= BIT(to); 57 else 58 val &= ~BIT(to); 59 60 ocelot_write_rix(ocelot, val, ANA_PGID_PGID, pgid); 61 } 62 63 static void felix_migrate_flood_to_npi_port(struct dsa_switch *ds, int port) 64 { 65 struct ocelot *ocelot = ds->priv; 66 67 felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_UC); 68 felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_MC); 69 felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_BC); 70 } 71 72 static void 73 felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port) 74 { 75 struct ocelot *ocelot = ds->priv; 76 77 felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_UC); 78 felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_MC); 79 felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC); 80 } 81 82 /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that 83 * the tagger can perform RX source port identification. 84 */ 85 static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid) 86 { 87 struct ocelot_vcap_filter *outer_tagging_rule; 88 struct ocelot *ocelot = &felix->ocelot; 89 struct dsa_switch *ds = felix->ds; 90 int key_length, upstream, err; 91 92 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 93 upstream = dsa_upstream_port(ds, port); 94 95 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 96 GFP_KERNEL); 97 if (!outer_tagging_rule) 98 return -ENOMEM; 99 100 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 101 outer_tagging_rule->prio = 1; 102 outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port); 103 outer_tagging_rule->id.tc_offload = false; 104 outer_tagging_rule->block_id = VCAP_ES0; 105 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 106 outer_tagging_rule->lookup = 0; 107 outer_tagging_rule->ingress_port.value = port; 108 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 109 outer_tagging_rule->egress_port.value = upstream; 110 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 111 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 112 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 113 outer_tagging_rule->action.tag_a_vid_sel = 1; 114 outer_tagging_rule->action.vid_a_val = vid; 115 116 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 117 if (err) 118 kfree(outer_tagging_rule); 119 120 return err; 121 } 122 123 static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid) 124 { 125 struct ocelot_vcap_filter *outer_tagging_rule; 126 struct ocelot_vcap_block *block_vcap_es0; 127 struct ocelot *ocelot = &felix->ocelot; 128 129 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 130 131 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 132 port, false); 133 if (!outer_tagging_rule) 134 return -ENOENT; 135 136 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 137 } 138 139 /* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 140 * rules for steering those tagged packets towards the correct destination port 141 */ 142 static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) 143 { 144 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 145 struct ocelot *ocelot = &felix->ocelot; 146 struct dsa_switch *ds = felix->ds; 147 int upstream, err; 148 149 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 150 if (!untagging_rule) 151 return -ENOMEM; 152 153 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 154 if (!redirect_rule) { 155 kfree(untagging_rule); 156 return -ENOMEM; 157 } 158 159 upstream = dsa_upstream_port(ds, port); 160 161 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 162 untagging_rule->ingress_port_mask = BIT(upstream); 163 untagging_rule->vlan.vid.value = vid; 164 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 165 untagging_rule->prio = 1; 166 untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); 167 untagging_rule->id.tc_offload = false; 168 untagging_rule->block_id = VCAP_IS1; 169 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 170 untagging_rule->lookup = 0; 171 untagging_rule->action.vlan_pop_cnt_ena = true; 172 untagging_rule->action.vlan_pop_cnt = 1; 173 untagging_rule->action.pag_override_mask = 0xff; 174 untagging_rule->action.pag_val = port; 175 176 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 177 if (err) { 178 kfree(untagging_rule); 179 kfree(redirect_rule); 180 return err; 181 } 182 183 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 184 redirect_rule->ingress_port_mask = BIT(upstream); 185 redirect_rule->pag = port; 186 redirect_rule->prio = 1; 187 redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); 188 redirect_rule->id.tc_offload = false; 189 redirect_rule->block_id = VCAP_IS2; 190 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 191 redirect_rule->lookup = 0; 192 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 193 redirect_rule->action.port_mask = BIT(port); 194 195 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 196 if (err) { 197 ocelot_vcap_filter_del(ocelot, untagging_rule); 198 kfree(redirect_rule); 199 return err; 200 } 201 202 return 0; 203 } 204 205 static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid) 206 { 207 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 208 struct ocelot_vcap_block *block_vcap_is1; 209 struct ocelot_vcap_block *block_vcap_is2; 210 struct ocelot *ocelot = &felix->ocelot; 211 int err; 212 213 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 214 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 215 216 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 217 port, false); 218 if (!untagging_rule) 219 return -ENOENT; 220 221 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 222 if (err) 223 return err; 224 225 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 226 port, false); 227 if (!redirect_rule) 228 return -ENOENT; 229 230 return ocelot_vcap_filter_del(ocelot, redirect_rule); 231 } 232 233 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 234 u16 flags) 235 { 236 struct ocelot *ocelot = ds->priv; 237 int err; 238 239 /* tag_8021q.c assumes we are implementing this via port VLAN 240 * membership, which we aren't. So we don't need to add any VCAP filter 241 * for the CPU port. 242 */ 243 if (!dsa_is_user_port(ds, port)) 244 return 0; 245 246 err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); 247 if (err) 248 return err; 249 250 err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid); 251 if (err) { 252 felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); 253 return err; 254 } 255 256 return 0; 257 } 258 259 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 260 { 261 struct ocelot *ocelot = ds->priv; 262 int err; 263 264 if (!dsa_is_user_port(ds, port)) 265 return 0; 266 267 err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); 268 if (err) 269 return err; 270 271 err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid); 272 if (err) { 273 felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); 274 return err; 275 } 276 277 return 0; 278 } 279 280 /* Alternatively to using the NPI functionality, that same hardware MAC 281 * connected internally to the enetc or fman DSA master can be configured to 282 * use the software-defined tag_8021q frame format. As far as the hardware is 283 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 284 * module are now disconnected from it, but can still be accessed through 285 * register-based MMIO. 286 */ 287 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 288 { 289 mutex_lock(&ocelot->fwd_domain_lock); 290 291 ocelot_port_set_dsa_8021q_cpu(ocelot, port); 292 293 /* Overwrite PGID_CPU with the non-tagging port */ 294 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 295 296 ocelot_apply_bridge_fwd_mask(ocelot, true); 297 298 mutex_unlock(&ocelot->fwd_domain_lock); 299 } 300 301 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 302 { 303 mutex_lock(&ocelot->fwd_domain_lock); 304 305 ocelot_port_unset_dsa_8021q_cpu(ocelot, port); 306 307 /* Restore PGID_CPU */ 308 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 309 PGID_CPU); 310 311 ocelot_apply_bridge_fwd_mask(ocelot, true); 312 313 mutex_unlock(&ocelot->fwd_domain_lock); 314 } 315 316 /* On switches with no extraction IRQ wired, trapped packets need to be 317 * replicated over Ethernet as well, otherwise we'd get no notification of 318 * their arrival when using the ocelot-8021q tagging protocol. 319 */ 320 static int felix_update_trapping_destinations(struct dsa_switch *ds, 321 bool using_tag_8021q) 322 { 323 struct ocelot *ocelot = ds->priv; 324 struct felix *felix = ocelot_to_felix(ocelot); 325 struct ocelot_vcap_filter *trap; 326 enum ocelot_mask_mode mask_mode; 327 unsigned long port_mask; 328 struct dsa_port *dp; 329 bool cpu_copy_ena; 330 int cpu = -1, err; 331 332 if (!felix->info->quirk_no_xtr_irq) 333 return 0; 334 335 /* Figure out the current CPU port */ 336 dsa_switch_for_each_cpu_port(dp, ds) { 337 cpu = dp->index; 338 break; 339 } 340 341 /* We are sure that "cpu" was found, otherwise 342 * dsa_tree_setup_default_cpu() would have failed earlier. 343 */ 344 345 /* Make sure all traps are set up for that destination */ 346 list_for_each_entry(trap, &ocelot->traps, trap_list) { 347 /* Figure out the current trapping destination */ 348 if (using_tag_8021q) { 349 /* Redirect to the tag_8021q CPU port. If timestamps 350 * are necessary, also copy trapped packets to the CPU 351 * port module. 352 */ 353 mask_mode = OCELOT_MASK_MODE_REDIRECT; 354 port_mask = BIT(cpu); 355 cpu_copy_ena = !!trap->take_ts; 356 } else { 357 /* Trap packets only to the CPU port module, which is 358 * redirected to the NPI port (the DSA CPU port) 359 */ 360 mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 361 port_mask = 0; 362 cpu_copy_ena = true; 363 } 364 365 if (trap->action.mask_mode == mask_mode && 366 trap->action.port_mask == port_mask && 367 trap->action.cpu_copy_ena == cpu_copy_ena) 368 continue; 369 370 trap->action.mask_mode = mask_mode; 371 trap->action.port_mask = port_mask; 372 trap->action.cpu_copy_ena = cpu_copy_ena; 373 374 err = ocelot_vcap_filter_replace(ocelot, trap); 375 if (err) 376 return err; 377 } 378 379 return 0; 380 } 381 382 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) 383 { 384 struct ocelot *ocelot = ds->priv; 385 struct dsa_port *dp; 386 int err; 387 388 felix_8021q_cpu_port_init(ocelot, cpu); 389 390 dsa_switch_for_each_available_port(dp, ds) { 391 /* This overwrites ocelot_init(): 392 * Do not forward BPDU frames to the CPU port module, 393 * for 2 reasons: 394 * - When these packets are injected from the tag_8021q 395 * CPU port, we want them to go out, not loop back 396 * into the system. 397 * - STP traffic ingressing on a user port should go to 398 * the tag_8021q CPU port, not to the hardware CPU 399 * port module. 400 */ 401 ocelot_write_gix(ocelot, 402 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 403 ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); 404 } 405 406 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 407 if (err) 408 return err; 409 410 err = ocelot_migrate_mdbs(ocelot, BIT(ocelot->num_phys_ports), 411 BIT(cpu)); 412 if (err) 413 goto out_tag_8021q_unregister; 414 415 felix_migrate_flood_to_tag_8021q_port(ds, cpu); 416 417 err = felix_update_trapping_destinations(ds, true); 418 if (err) 419 goto out_migrate_flood; 420 421 /* The ownership of the CPU port module's queues might have just been 422 * transferred to the tag_8021q tagger from the NPI-based tagger. 423 * So there might still be all sorts of crap in the queues. On the 424 * other hand, the MMIO-based matching of PTP frames is very brittle, 425 * so we need to be careful that there are no extra frames to be 426 * dequeued over MMIO, since we would never know to discard them. 427 */ 428 ocelot_drain_cpu_queue(ocelot, 0); 429 430 return 0; 431 432 out_migrate_flood: 433 felix_migrate_flood_to_npi_port(ds, cpu); 434 ocelot_migrate_mdbs(ocelot, BIT(cpu), BIT(ocelot->num_phys_ports)); 435 out_tag_8021q_unregister: 436 dsa_tag_8021q_unregister(ds); 437 return err; 438 } 439 440 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) 441 { 442 struct ocelot *ocelot = ds->priv; 443 struct dsa_port *dp; 444 int err; 445 446 err = felix_update_trapping_destinations(ds, false); 447 if (err) 448 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", 449 err); 450 451 dsa_tag_8021q_unregister(ds); 452 453 dsa_switch_for_each_available_port(dp, ds) { 454 /* Restore the logic from ocelot_init: 455 * do not forward BPDU frames to the front ports. 456 */ 457 ocelot_write_gix(ocelot, 458 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 459 ANA_PORT_CPU_FWD_BPDU_CFG, 460 dp->index); 461 } 462 463 felix_8021q_cpu_port_deinit(ocelot, cpu); 464 } 465 466 /* The CPU port module is connected to the Node Processor Interface (NPI). This 467 * is the mode through which frames can be injected from and extracted to an 468 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 469 * running Linux, and this forms a DSA setup together with the enetc or fman 470 * DSA master. 471 */ 472 static void felix_npi_port_init(struct ocelot *ocelot, int port) 473 { 474 ocelot->npi = port; 475 476 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 477 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 478 QSYS_EXT_CPU_CFG); 479 480 /* NPI port Injection/Extraction configuration */ 481 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 482 ocelot->npi_xtr_prefix); 483 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 484 ocelot->npi_inj_prefix); 485 486 /* Disable transmission of pause frames */ 487 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 488 } 489 490 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 491 { 492 /* Restore hardware defaults */ 493 int unused_port = ocelot->num_phys_ports + 2; 494 495 ocelot->npi = -1; 496 497 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 498 QSYS_EXT_CPU_CFG); 499 500 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 501 OCELOT_TAG_PREFIX_DISABLED); 502 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 503 OCELOT_TAG_PREFIX_DISABLED); 504 505 /* Enable transmission of pause frames */ 506 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 507 } 508 509 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) 510 { 511 struct ocelot *ocelot = ds->priv; 512 int err; 513 514 err = ocelot_migrate_mdbs(ocelot, BIT(cpu), 515 BIT(ocelot->num_phys_ports)); 516 if (err) 517 return err; 518 519 felix_migrate_flood_to_npi_port(ds, cpu); 520 521 felix_npi_port_init(ocelot, cpu); 522 523 return 0; 524 } 525 526 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) 527 { 528 struct ocelot *ocelot = ds->priv; 529 530 felix_npi_port_deinit(ocelot, cpu); 531 } 532 533 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, 534 enum dsa_tag_protocol proto) 535 { 536 int err; 537 538 switch (proto) { 539 case DSA_TAG_PROTO_SEVILLE: 540 case DSA_TAG_PROTO_OCELOT: 541 err = felix_setup_tag_npi(ds, cpu); 542 break; 543 case DSA_TAG_PROTO_OCELOT_8021Q: 544 err = felix_setup_tag_8021q(ds, cpu); 545 break; 546 default: 547 err = -EPROTONOSUPPORT; 548 } 549 550 return err; 551 } 552 553 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, 554 enum dsa_tag_protocol proto) 555 { 556 switch (proto) { 557 case DSA_TAG_PROTO_SEVILLE: 558 case DSA_TAG_PROTO_OCELOT: 559 felix_teardown_tag_npi(ds, cpu); 560 break; 561 case DSA_TAG_PROTO_OCELOT_8021Q: 562 felix_teardown_tag_8021q(ds, cpu); 563 break; 564 default: 565 break; 566 } 567 } 568 569 /* This always leaves the switch in a consistent state, because although the 570 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 571 * or the restoration is guaranteed to work. 572 */ 573 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, 574 enum dsa_tag_protocol proto) 575 { 576 struct ocelot *ocelot = ds->priv; 577 struct felix *felix = ocelot_to_felix(ocelot); 578 enum dsa_tag_protocol old_proto = felix->tag_proto; 579 bool cpu_port_active = false; 580 struct dsa_port *dp; 581 int err; 582 583 if (proto != DSA_TAG_PROTO_SEVILLE && 584 proto != DSA_TAG_PROTO_OCELOT && 585 proto != DSA_TAG_PROTO_OCELOT_8021Q) 586 return -EPROTONOSUPPORT; 587 588 /* We don't support multiple CPU ports, yet the DT blob may have 589 * multiple CPU ports defined. The first CPU port is the active one, 590 * the others are inactive. In this case, DSA will call 591 * ->change_tag_protocol() multiple times, once per CPU port. 592 * Since we implement the tagging protocol change towards "ocelot" or 593 * "seville" as effectively initializing the NPI port, what we are 594 * doing is effectively changing who the NPI port is to the last @cpu 595 * argument passed, which is an unused DSA CPU port and not the one 596 * that should actively pass traffic. 597 * Suppress DSA's calls on CPU ports that are inactive. 598 */ 599 dsa_switch_for_each_user_port(dp, ds) { 600 if (dp->cpu_dp->index == cpu) { 601 cpu_port_active = true; 602 break; 603 } 604 } 605 606 if (!cpu_port_active) 607 return 0; 608 609 felix_del_tag_protocol(ds, cpu, old_proto); 610 611 err = felix_set_tag_protocol(ds, cpu, proto); 612 if (err) { 613 felix_set_tag_protocol(ds, cpu, old_proto); 614 return err; 615 } 616 617 felix->tag_proto = proto; 618 619 return 0; 620 } 621 622 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 623 int port, 624 enum dsa_tag_protocol mp) 625 { 626 struct ocelot *ocelot = ds->priv; 627 struct felix *felix = ocelot_to_felix(ocelot); 628 629 return felix->tag_proto; 630 } 631 632 static int felix_set_ageing_time(struct dsa_switch *ds, 633 unsigned int ageing_time) 634 { 635 struct ocelot *ocelot = ds->priv; 636 637 ocelot_set_ageing_time(ocelot, ageing_time); 638 639 return 0; 640 } 641 642 static void felix_port_fast_age(struct dsa_switch *ds, int port) 643 { 644 struct ocelot *ocelot = ds->priv; 645 int err; 646 647 err = ocelot_mact_flush(ocelot, port); 648 if (err) 649 dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", 650 port, ERR_PTR(err)); 651 } 652 653 static int felix_fdb_dump(struct dsa_switch *ds, int port, 654 dsa_fdb_dump_cb_t *cb, void *data) 655 { 656 struct ocelot *ocelot = ds->priv; 657 658 return ocelot_fdb_dump(ocelot, port, cb, data); 659 } 660 661 static int felix_fdb_add(struct dsa_switch *ds, int port, 662 const unsigned char *addr, u16 vid, 663 struct dsa_db db) 664 { 665 struct net_device *bridge_dev = felix_classify_db(db); 666 struct ocelot *ocelot = ds->priv; 667 668 if (IS_ERR(bridge_dev)) 669 return PTR_ERR(bridge_dev); 670 671 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 672 dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) 673 return 0; 674 675 return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); 676 } 677 678 static int felix_fdb_del(struct dsa_switch *ds, int port, 679 const unsigned char *addr, u16 vid, 680 struct dsa_db db) 681 { 682 struct net_device *bridge_dev = felix_classify_db(db); 683 struct ocelot *ocelot = ds->priv; 684 685 if (IS_ERR(bridge_dev)) 686 return PTR_ERR(bridge_dev); 687 688 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 689 dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) 690 return 0; 691 692 return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); 693 } 694 695 static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, 696 const unsigned char *addr, u16 vid, 697 struct dsa_db db) 698 { 699 struct net_device *bridge_dev = felix_classify_db(db); 700 struct ocelot *ocelot = ds->priv; 701 702 if (IS_ERR(bridge_dev)) 703 return PTR_ERR(bridge_dev); 704 705 return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); 706 } 707 708 static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, 709 const unsigned char *addr, u16 vid, 710 struct dsa_db db) 711 { 712 struct net_device *bridge_dev = felix_classify_db(db); 713 struct ocelot *ocelot = ds->priv; 714 715 if (IS_ERR(bridge_dev)) 716 return PTR_ERR(bridge_dev); 717 718 return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); 719 } 720 721 static int felix_mdb_add(struct dsa_switch *ds, int port, 722 const struct switchdev_obj_port_mdb *mdb, 723 struct dsa_db db) 724 { 725 struct net_device *bridge_dev = felix_classify_db(db); 726 struct ocelot *ocelot = ds->priv; 727 728 if (IS_ERR(bridge_dev)) 729 return PTR_ERR(bridge_dev); 730 731 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 732 dsa_mdb_present_in_other_db(ds, port, mdb, db)) 733 return 0; 734 735 return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); 736 } 737 738 static int felix_mdb_del(struct dsa_switch *ds, int port, 739 const struct switchdev_obj_port_mdb *mdb, 740 struct dsa_db db) 741 { 742 struct net_device *bridge_dev = felix_classify_db(db); 743 struct ocelot *ocelot = ds->priv; 744 745 if (IS_ERR(bridge_dev)) 746 return PTR_ERR(bridge_dev); 747 748 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 749 dsa_mdb_present_in_other_db(ds, port, mdb, db)) 750 return 0; 751 752 return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); 753 } 754 755 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 756 u8 state) 757 { 758 struct ocelot *ocelot = ds->priv; 759 760 return ocelot_bridge_stp_state_set(ocelot, port, state); 761 } 762 763 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 764 struct switchdev_brport_flags val, 765 struct netlink_ext_ack *extack) 766 { 767 struct ocelot *ocelot = ds->priv; 768 769 return ocelot_port_pre_bridge_flags(ocelot, port, val); 770 } 771 772 static int felix_bridge_flags(struct dsa_switch *ds, int port, 773 struct switchdev_brport_flags val, 774 struct netlink_ext_ack *extack) 775 { 776 struct ocelot *ocelot = ds->priv; 777 778 ocelot_port_bridge_flags(ocelot, port, val); 779 780 return 0; 781 } 782 783 static int felix_bridge_join(struct dsa_switch *ds, int port, 784 struct dsa_bridge bridge, bool *tx_fwd_offload, 785 struct netlink_ext_ack *extack) 786 { 787 struct ocelot *ocelot = ds->priv; 788 789 return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, 790 extack); 791 } 792 793 static void felix_bridge_leave(struct dsa_switch *ds, int port, 794 struct dsa_bridge bridge) 795 { 796 struct ocelot *ocelot = ds->priv; 797 798 ocelot_port_bridge_leave(ocelot, port, bridge.dev); 799 } 800 801 static int felix_lag_join(struct dsa_switch *ds, int port, 802 struct dsa_lag lag, 803 struct netdev_lag_upper_info *info) 804 { 805 struct ocelot *ocelot = ds->priv; 806 807 return ocelot_port_lag_join(ocelot, port, lag.dev, info); 808 } 809 810 static int felix_lag_leave(struct dsa_switch *ds, int port, 811 struct dsa_lag lag) 812 { 813 struct ocelot *ocelot = ds->priv; 814 815 ocelot_port_lag_leave(ocelot, port, lag.dev); 816 817 return 0; 818 } 819 820 static int felix_lag_change(struct dsa_switch *ds, int port) 821 { 822 struct dsa_port *dp = dsa_to_port(ds, port); 823 struct ocelot *ocelot = ds->priv; 824 825 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 826 827 return 0; 828 } 829 830 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 831 const struct switchdev_obj_port_vlan *vlan, 832 struct netlink_ext_ack *extack) 833 { 834 struct ocelot *ocelot = ds->priv; 835 u16 flags = vlan->flags; 836 837 /* Ocelot switches copy frames as-is to the CPU, so the flags: 838 * egress-untagged or not, pvid or not, make no difference. This 839 * behavior is already better than what DSA just tries to approximate 840 * when it installs the VLAN with the same flags on the CPU port. 841 * Just accept any configuration, and don't let ocelot deny installing 842 * multiple native VLANs on the NPI port, because the switch doesn't 843 * look at the port tag settings towards the NPI interface anyway. 844 */ 845 if (port == ocelot->npi) 846 return 0; 847 848 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 849 flags & BRIDGE_VLAN_INFO_PVID, 850 flags & BRIDGE_VLAN_INFO_UNTAGGED, 851 extack); 852 } 853 854 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 855 struct netlink_ext_ack *extack) 856 { 857 struct ocelot *ocelot = ds->priv; 858 859 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 860 } 861 862 static int felix_vlan_add(struct dsa_switch *ds, int port, 863 const struct switchdev_obj_port_vlan *vlan, 864 struct netlink_ext_ack *extack) 865 { 866 struct ocelot *ocelot = ds->priv; 867 u16 flags = vlan->flags; 868 int err; 869 870 err = felix_vlan_prepare(ds, port, vlan, extack); 871 if (err) 872 return err; 873 874 return ocelot_vlan_add(ocelot, port, vlan->vid, 875 flags & BRIDGE_VLAN_INFO_PVID, 876 flags & BRIDGE_VLAN_INFO_UNTAGGED); 877 } 878 879 static int felix_vlan_del(struct dsa_switch *ds, int port, 880 const struct switchdev_obj_port_vlan *vlan) 881 { 882 struct ocelot *ocelot = ds->priv; 883 884 return ocelot_vlan_del(ocelot, port, vlan->vid); 885 } 886 887 static void felix_phylink_get_caps(struct dsa_switch *ds, int port, 888 struct phylink_config *config) 889 { 890 struct ocelot *ocelot = ds->priv; 891 892 /* This driver does not make use of the speed, duplex, pause or the 893 * advertisement in its mac_config, so it is safe to mark this driver 894 * as non-legacy. 895 */ 896 config->legacy_pre_march2020 = false; 897 898 __set_bit(ocelot->ports[port]->phy_mode, 899 config->supported_interfaces); 900 } 901 902 static void felix_phylink_validate(struct dsa_switch *ds, int port, 903 unsigned long *supported, 904 struct phylink_link_state *state) 905 { 906 struct ocelot *ocelot = ds->priv; 907 struct felix *felix = ocelot_to_felix(ocelot); 908 909 if (felix->info->phylink_validate) 910 felix->info->phylink_validate(ocelot, port, supported, state); 911 } 912 913 static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, 914 int port, 915 phy_interface_t iface) 916 { 917 struct ocelot *ocelot = ds->priv; 918 struct felix *felix = ocelot_to_felix(ocelot); 919 struct phylink_pcs *pcs = NULL; 920 921 if (felix->pcs && felix->pcs[port]) 922 pcs = felix->pcs[port]; 923 924 return pcs; 925 } 926 927 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 928 unsigned int link_an_mode, 929 phy_interface_t interface) 930 { 931 struct ocelot *ocelot = ds->priv; 932 933 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 934 FELIX_MAC_QUIRKS); 935 } 936 937 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 938 unsigned int link_an_mode, 939 phy_interface_t interface, 940 struct phy_device *phydev, 941 int speed, int duplex, 942 bool tx_pause, bool rx_pause) 943 { 944 struct ocelot *ocelot = ds->priv; 945 struct felix *felix = ocelot_to_felix(ocelot); 946 947 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 948 interface, speed, duplex, tx_pause, rx_pause, 949 FELIX_MAC_QUIRKS); 950 951 if (felix->info->port_sched_speed_set) 952 felix->info->port_sched_speed_set(ocelot, port, speed); 953 } 954 955 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 956 { 957 int i; 958 959 ocelot_rmw_gix(ocelot, 960 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 961 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 962 ANA_PORT_QOS_CFG, 963 port); 964 965 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 966 ocelot_rmw_ix(ocelot, 967 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 968 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 969 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 970 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 971 ANA_PORT_PCP_DEI_MAP, 972 port, i); 973 } 974 } 975 976 static void felix_get_strings(struct dsa_switch *ds, int port, 977 u32 stringset, u8 *data) 978 { 979 struct ocelot *ocelot = ds->priv; 980 981 return ocelot_get_strings(ocelot, port, stringset, data); 982 } 983 984 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 985 { 986 struct ocelot *ocelot = ds->priv; 987 988 ocelot_get_ethtool_stats(ocelot, port, data); 989 } 990 991 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 992 { 993 struct ocelot *ocelot = ds->priv; 994 995 return ocelot_get_sset_count(ocelot, port, sset); 996 } 997 998 static int felix_get_ts_info(struct dsa_switch *ds, int port, 999 struct ethtool_ts_info *info) 1000 { 1001 struct ocelot *ocelot = ds->priv; 1002 1003 return ocelot_get_ts_info(ocelot, port, info); 1004 } 1005 1006 static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { 1007 [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, 1008 [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, 1009 [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, 1010 [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, 1011 [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, 1012 }; 1013 1014 static int felix_validate_phy_mode(struct felix *felix, int port, 1015 phy_interface_t phy_mode) 1016 { 1017 u32 modes = felix->info->port_modes[port]; 1018 1019 if (felix_phy_match_table[phy_mode] & modes) 1020 return 0; 1021 return -EOPNOTSUPP; 1022 } 1023 1024 static int felix_parse_ports_node(struct felix *felix, 1025 struct device_node *ports_node, 1026 phy_interface_t *port_phy_modes) 1027 { 1028 struct device *dev = felix->ocelot.dev; 1029 struct device_node *child; 1030 1031 for_each_available_child_of_node(ports_node, child) { 1032 phy_interface_t phy_mode; 1033 u32 port; 1034 int err; 1035 1036 /* Get switch port number from DT */ 1037 if (of_property_read_u32(child, "reg", &port) < 0) { 1038 dev_err(dev, "Port number not defined in device tree " 1039 "(property \"reg\")\n"); 1040 of_node_put(child); 1041 return -ENODEV; 1042 } 1043 1044 /* Get PHY mode from DT */ 1045 err = of_get_phy_mode(child, &phy_mode); 1046 if (err) { 1047 dev_err(dev, "Failed to read phy-mode or " 1048 "phy-interface-type property for port %d\n", 1049 port); 1050 of_node_put(child); 1051 return -ENODEV; 1052 } 1053 1054 err = felix_validate_phy_mode(felix, port, phy_mode); 1055 if (err < 0) { 1056 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 1057 phy_modes(phy_mode), port); 1058 of_node_put(child); 1059 return err; 1060 } 1061 1062 port_phy_modes[port] = phy_mode; 1063 } 1064 1065 return 0; 1066 } 1067 1068 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 1069 { 1070 struct device *dev = felix->ocelot.dev; 1071 struct device_node *switch_node; 1072 struct device_node *ports_node; 1073 int err; 1074 1075 switch_node = dev->of_node; 1076 1077 ports_node = of_get_child_by_name(switch_node, "ports"); 1078 if (!ports_node) 1079 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 1080 if (!ports_node) { 1081 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); 1082 return -ENODEV; 1083 } 1084 1085 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 1086 of_node_put(ports_node); 1087 1088 return err; 1089 } 1090 1091 static int felix_init_structs(struct felix *felix, int num_phys_ports) 1092 { 1093 struct ocelot *ocelot = &felix->ocelot; 1094 phy_interface_t *port_phy_modes; 1095 struct resource res; 1096 int port, i, err; 1097 1098 ocelot->num_phys_ports = num_phys_ports; 1099 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 1100 sizeof(struct ocelot_port *), GFP_KERNEL); 1101 if (!ocelot->ports) 1102 return -ENOMEM; 1103 1104 ocelot->map = felix->info->map; 1105 ocelot->stats_layout = felix->info->stats_layout; 1106 ocelot->num_mact_rows = felix->info->num_mact_rows; 1107 ocelot->vcap = felix->info->vcap; 1108 ocelot->vcap_pol.base = felix->info->vcap_pol_base; 1109 ocelot->vcap_pol.max = felix->info->vcap_pol_max; 1110 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; 1111 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; 1112 ocelot->ops = felix->info->ops; 1113 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1114 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1115 ocelot->devlink = felix->ds->devlink; 1116 1117 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1118 GFP_KERNEL); 1119 if (!port_phy_modes) 1120 return -ENOMEM; 1121 1122 err = felix_parse_dt(felix, port_phy_modes); 1123 if (err) { 1124 kfree(port_phy_modes); 1125 return err; 1126 } 1127 1128 for (i = 0; i < TARGET_MAX; i++) { 1129 struct regmap *target; 1130 1131 if (!felix->info->target_io_res[i].name) 1132 continue; 1133 1134 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1135 res.flags = IORESOURCE_MEM; 1136 res.start += felix->switch_base; 1137 res.end += felix->switch_base; 1138 1139 target = felix->info->init_regmap(ocelot, &res); 1140 if (IS_ERR(target)) { 1141 dev_err(ocelot->dev, 1142 "Failed to map device memory space\n"); 1143 kfree(port_phy_modes); 1144 return PTR_ERR(target); 1145 } 1146 1147 ocelot->targets[i] = target; 1148 } 1149 1150 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1151 if (err) { 1152 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1153 kfree(port_phy_modes); 1154 return err; 1155 } 1156 1157 for (port = 0; port < num_phys_ports; port++) { 1158 struct ocelot_port *ocelot_port; 1159 struct regmap *target; 1160 1161 ocelot_port = devm_kzalloc(ocelot->dev, 1162 sizeof(struct ocelot_port), 1163 GFP_KERNEL); 1164 if (!ocelot_port) { 1165 dev_err(ocelot->dev, 1166 "failed to allocate port memory\n"); 1167 kfree(port_phy_modes); 1168 return -ENOMEM; 1169 } 1170 1171 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1172 res.flags = IORESOURCE_MEM; 1173 res.start += felix->switch_base; 1174 res.end += felix->switch_base; 1175 1176 target = felix->info->init_regmap(ocelot, &res); 1177 if (IS_ERR(target)) { 1178 dev_err(ocelot->dev, 1179 "Failed to map memory space for port %d\n", 1180 port); 1181 kfree(port_phy_modes); 1182 return PTR_ERR(target); 1183 } 1184 1185 ocelot_port->phy_mode = port_phy_modes[port]; 1186 ocelot_port->ocelot = ocelot; 1187 ocelot_port->target = target; 1188 ocelot->ports[port] = ocelot_port; 1189 } 1190 1191 kfree(port_phy_modes); 1192 1193 if (felix->info->mdio_bus_alloc) { 1194 err = felix->info->mdio_bus_alloc(ocelot); 1195 if (err < 0) 1196 return err; 1197 } 1198 1199 return 0; 1200 } 1201 1202 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1203 struct sk_buff *skb) 1204 { 1205 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1206 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1207 struct sk_buff *skb_match = NULL, *skb_tmp; 1208 unsigned long flags; 1209 1210 if (!clone) 1211 return; 1212 1213 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1214 1215 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1216 if (skb != clone) 1217 continue; 1218 __skb_unlink(skb, &ocelot_port->tx_skbs); 1219 skb_match = skb; 1220 break; 1221 } 1222 1223 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1224 1225 WARN_ONCE(!skb_match, 1226 "Could not find skb clone in TX timestamping list\n"); 1227 } 1228 1229 #define work_to_xmit_work(w) \ 1230 container_of((w), struct felix_deferred_xmit_work, work) 1231 1232 static void felix_port_deferred_xmit(struct kthread_work *work) 1233 { 1234 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1235 struct dsa_switch *ds = xmit_work->dp->ds; 1236 struct sk_buff *skb = xmit_work->skb; 1237 u32 rew_op = ocelot_ptp_rew_op(skb); 1238 struct ocelot *ocelot = ds->priv; 1239 int port = xmit_work->dp->index; 1240 int retries = 10; 1241 1242 do { 1243 if (ocelot_can_inject(ocelot, 0)) 1244 break; 1245 1246 cpu_relax(); 1247 } while (--retries); 1248 1249 if (!retries) { 1250 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1251 port); 1252 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1253 kfree_skb(skb); 1254 return; 1255 } 1256 1257 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1258 1259 consume_skb(skb); 1260 kfree(xmit_work); 1261 } 1262 1263 static int felix_connect_tag_protocol(struct dsa_switch *ds, 1264 enum dsa_tag_protocol proto) 1265 { 1266 struct ocelot_8021q_tagger_data *tagger_data; 1267 1268 switch (proto) { 1269 case DSA_TAG_PROTO_OCELOT_8021Q: 1270 tagger_data = ocelot_8021q_tagger_data(ds); 1271 tagger_data->xmit_work_fn = felix_port_deferred_xmit; 1272 return 0; 1273 case DSA_TAG_PROTO_OCELOT: 1274 case DSA_TAG_PROTO_SEVILLE: 1275 return 0; 1276 default: 1277 return -EPROTONOSUPPORT; 1278 } 1279 } 1280 1281 /* Hardware initialization done here so that we can allocate structures with 1282 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1283 * us to allocate structures twice (leak memory) and map PCI memory twice 1284 * (which will not work). 1285 */ 1286 static int felix_setup(struct dsa_switch *ds) 1287 { 1288 struct ocelot *ocelot = ds->priv; 1289 struct felix *felix = ocelot_to_felix(ocelot); 1290 unsigned long cpu_flood; 1291 struct dsa_port *dp; 1292 int err; 1293 1294 err = felix_init_structs(felix, ds->num_ports); 1295 if (err) 1296 return err; 1297 1298 err = ocelot_init(ocelot); 1299 if (err) 1300 goto out_mdiobus_free; 1301 1302 if (ocelot->ptp) { 1303 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1304 if (err) { 1305 dev_err(ocelot->dev, 1306 "Timestamp initialization failed\n"); 1307 ocelot->ptp = 0; 1308 } 1309 } 1310 1311 dsa_switch_for_each_available_port(dp, ds) { 1312 ocelot_init_port(ocelot, dp->index); 1313 1314 /* Set the default QoS Classification based on PCP and DEI 1315 * bits of vlan tag. 1316 */ 1317 felix_port_qos_map_init(ocelot, dp->index); 1318 } 1319 1320 err = ocelot_devlink_sb_register(ocelot); 1321 if (err) 1322 goto out_deinit_ports; 1323 1324 dsa_switch_for_each_cpu_port(dp, ds) { 1325 /* The initial tag protocol is NPI which always returns 0, so 1326 * there's no real point in checking for errors. 1327 */ 1328 felix_set_tag_protocol(ds, dp->index, felix->tag_proto); 1329 1330 /* Start off with flooding disabled towards the NPI port 1331 * (actually CPU port module). 1332 */ 1333 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 1334 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); 1335 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); 1336 1337 break; 1338 } 1339 1340 ds->mtu_enforcement_ingress = true; 1341 ds->assisted_learning_on_cpu_port = true; 1342 ds->fdb_isolation = true; 1343 ds->max_num_bridges = ds->num_ports; 1344 1345 return 0; 1346 1347 out_deinit_ports: 1348 dsa_switch_for_each_available_port(dp, ds) 1349 ocelot_deinit_port(ocelot, dp->index); 1350 1351 ocelot_deinit_timestamp(ocelot); 1352 ocelot_deinit(ocelot); 1353 1354 out_mdiobus_free: 1355 if (felix->info->mdio_bus_free) 1356 felix->info->mdio_bus_free(ocelot); 1357 1358 return err; 1359 } 1360 1361 static void felix_teardown(struct dsa_switch *ds) 1362 { 1363 struct ocelot *ocelot = ds->priv; 1364 struct felix *felix = ocelot_to_felix(ocelot); 1365 struct dsa_port *dp; 1366 1367 dsa_switch_for_each_cpu_port(dp, ds) { 1368 felix_del_tag_protocol(ds, dp->index, felix->tag_proto); 1369 break; 1370 } 1371 1372 dsa_switch_for_each_available_port(dp, ds) 1373 ocelot_deinit_port(ocelot, dp->index); 1374 1375 ocelot_devlink_sb_unregister(ocelot); 1376 ocelot_deinit_timestamp(ocelot); 1377 ocelot_deinit(ocelot); 1378 1379 if (felix->info->mdio_bus_free) 1380 felix->info->mdio_bus_free(ocelot); 1381 } 1382 1383 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1384 struct ifreq *ifr) 1385 { 1386 struct ocelot *ocelot = ds->priv; 1387 1388 return ocelot_hwstamp_get(ocelot, port, ifr); 1389 } 1390 1391 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1392 struct ifreq *ifr) 1393 { 1394 struct ocelot *ocelot = ds->priv; 1395 struct felix *felix = ocelot_to_felix(ocelot); 1396 bool using_tag_8021q; 1397 int err; 1398 1399 err = ocelot_hwstamp_set(ocelot, port, ifr); 1400 if (err) 1401 return err; 1402 1403 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; 1404 1405 return felix_update_trapping_destinations(ds, using_tag_8021q); 1406 } 1407 1408 static bool felix_check_xtr_pkt(struct ocelot *ocelot) 1409 { 1410 struct felix *felix = ocelot_to_felix(ocelot); 1411 int err = 0, grp = 0; 1412 1413 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1414 return false; 1415 1416 if (!felix->info->quirk_no_xtr_irq) 1417 return false; 1418 1419 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1420 struct sk_buff *skb; 1421 unsigned int type; 1422 1423 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1424 if (err) 1425 goto out; 1426 1427 /* We trap to the CPU port module all PTP frames, but 1428 * felix_rxtstamp() only gets called for event frames. 1429 * So we need to avoid sending duplicate general 1430 * message frames by running a second BPF classifier 1431 * here and dropping those. 1432 */ 1433 __skb_push(skb, ETH_HLEN); 1434 1435 type = ptp_classify_raw(skb); 1436 1437 __skb_pull(skb, ETH_HLEN); 1438 1439 if (type == PTP_CLASS_NONE) { 1440 kfree_skb(skb); 1441 continue; 1442 } 1443 1444 netif_rx(skb); 1445 } 1446 1447 out: 1448 if (err < 0) { 1449 dev_err_ratelimited(ocelot->dev, 1450 "Error during packet extraction: %pe\n", 1451 ERR_PTR(err)); 1452 ocelot_drain_cpu_queue(ocelot, 0); 1453 } 1454 1455 return true; 1456 } 1457 1458 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1459 struct sk_buff *skb, unsigned int type) 1460 { 1461 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; 1462 struct skb_shared_hwtstamps *shhwtstamps; 1463 struct ocelot *ocelot = ds->priv; 1464 struct timespec64 ts; 1465 u32 tstamp_hi; 1466 u64 tstamp; 1467 1468 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1469 * for RX timestamping. Then free it, and poll for its copy through 1470 * MMIO in the CPU port module, and inject that into the stack from 1471 * ocelot_xtr_poll(). 1472 */ 1473 if (felix_check_xtr_pkt(ocelot)) { 1474 kfree_skb(skb); 1475 return true; 1476 } 1477 1478 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1479 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1480 1481 tstamp_hi = tstamp >> 32; 1482 if ((tstamp & 0xffffffff) < tstamp_lo) 1483 tstamp_hi--; 1484 1485 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1486 1487 shhwtstamps = skb_hwtstamps(skb); 1488 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1489 shhwtstamps->hwtstamp = tstamp; 1490 return false; 1491 } 1492 1493 static void felix_txtstamp(struct dsa_switch *ds, int port, 1494 struct sk_buff *skb) 1495 { 1496 struct ocelot *ocelot = ds->priv; 1497 struct sk_buff *clone = NULL; 1498 1499 if (!ocelot->ptp) 1500 return; 1501 1502 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1503 dev_err_ratelimited(ds->dev, 1504 "port %d delivering skb without TX timestamp\n", 1505 port); 1506 return; 1507 } 1508 1509 if (clone) 1510 OCELOT_SKB_CB(skb)->clone = clone; 1511 } 1512 1513 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1514 { 1515 struct ocelot *ocelot = ds->priv; 1516 1517 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1518 1519 return 0; 1520 } 1521 1522 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1523 { 1524 struct ocelot *ocelot = ds->priv; 1525 1526 return ocelot_get_max_mtu(ocelot, port); 1527 } 1528 1529 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1530 struct flow_cls_offload *cls, bool ingress) 1531 { 1532 struct ocelot *ocelot = ds->priv; 1533 struct felix *felix = ocelot_to_felix(ocelot); 1534 bool using_tag_8021q; 1535 int err; 1536 1537 err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1538 if (err) 1539 return err; 1540 1541 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; 1542 1543 return felix_update_trapping_destinations(ds, using_tag_8021q); 1544 } 1545 1546 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1547 struct flow_cls_offload *cls, bool ingress) 1548 { 1549 struct ocelot *ocelot = ds->priv; 1550 1551 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1552 } 1553 1554 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1555 struct flow_cls_offload *cls, bool ingress) 1556 { 1557 struct ocelot *ocelot = ds->priv; 1558 1559 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1560 } 1561 1562 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1563 struct dsa_mall_policer_tc_entry *policer) 1564 { 1565 struct ocelot *ocelot = ds->priv; 1566 struct ocelot_policer pol = { 1567 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1568 .burst = policer->burst, 1569 }; 1570 1571 return ocelot_port_policer_add(ocelot, port, &pol); 1572 } 1573 1574 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1575 { 1576 struct ocelot *ocelot = ds->priv; 1577 1578 ocelot_port_policer_del(ocelot, port); 1579 } 1580 1581 static int felix_port_mirror_add(struct dsa_switch *ds, int port, 1582 struct dsa_mall_mirror_tc_entry *mirror, 1583 bool ingress, struct netlink_ext_ack *extack) 1584 { 1585 struct ocelot *ocelot = ds->priv; 1586 1587 return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port, 1588 ingress, extack); 1589 } 1590 1591 static void felix_port_mirror_del(struct dsa_switch *ds, int port, 1592 struct dsa_mall_mirror_tc_entry *mirror) 1593 { 1594 struct ocelot *ocelot = ds->priv; 1595 1596 ocelot_port_mirror_del(ocelot, port, mirror->ingress); 1597 } 1598 1599 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1600 enum tc_setup_type type, 1601 void *type_data) 1602 { 1603 struct ocelot *ocelot = ds->priv; 1604 struct felix *felix = ocelot_to_felix(ocelot); 1605 1606 if (felix->info->port_setup_tc) 1607 return felix->info->port_setup_tc(ds, port, type, type_data); 1608 else 1609 return -EOPNOTSUPP; 1610 } 1611 1612 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1613 u16 pool_index, 1614 struct devlink_sb_pool_info *pool_info) 1615 { 1616 struct ocelot *ocelot = ds->priv; 1617 1618 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1619 } 1620 1621 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1622 u16 pool_index, u32 size, 1623 enum devlink_sb_threshold_type threshold_type, 1624 struct netlink_ext_ack *extack) 1625 { 1626 struct ocelot *ocelot = ds->priv; 1627 1628 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1629 threshold_type, extack); 1630 } 1631 1632 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1633 unsigned int sb_index, u16 pool_index, 1634 u32 *p_threshold) 1635 { 1636 struct ocelot *ocelot = ds->priv; 1637 1638 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1639 p_threshold); 1640 } 1641 1642 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1643 unsigned int sb_index, u16 pool_index, 1644 u32 threshold, struct netlink_ext_ack *extack) 1645 { 1646 struct ocelot *ocelot = ds->priv; 1647 1648 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1649 threshold, extack); 1650 } 1651 1652 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1653 unsigned int sb_index, u16 tc_index, 1654 enum devlink_sb_pool_type pool_type, 1655 u16 *p_pool_index, u32 *p_threshold) 1656 { 1657 struct ocelot *ocelot = ds->priv; 1658 1659 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1660 pool_type, p_pool_index, 1661 p_threshold); 1662 } 1663 1664 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1665 unsigned int sb_index, u16 tc_index, 1666 enum devlink_sb_pool_type pool_type, 1667 u16 pool_index, u32 threshold, 1668 struct netlink_ext_ack *extack) 1669 { 1670 struct ocelot *ocelot = ds->priv; 1671 1672 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1673 pool_type, pool_index, threshold, 1674 extack); 1675 } 1676 1677 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1678 unsigned int sb_index) 1679 { 1680 struct ocelot *ocelot = ds->priv; 1681 1682 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1683 } 1684 1685 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1686 unsigned int sb_index) 1687 { 1688 struct ocelot *ocelot = ds->priv; 1689 1690 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1691 } 1692 1693 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1694 unsigned int sb_index, u16 pool_index, 1695 u32 *p_cur, u32 *p_max) 1696 { 1697 struct ocelot *ocelot = ds->priv; 1698 1699 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1700 p_cur, p_max); 1701 } 1702 1703 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1704 unsigned int sb_index, u16 tc_index, 1705 enum devlink_sb_pool_type pool_type, 1706 u32 *p_cur, u32 *p_max) 1707 { 1708 struct ocelot *ocelot = ds->priv; 1709 1710 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1711 pool_type, p_cur, p_max); 1712 } 1713 1714 static int felix_mrp_add(struct dsa_switch *ds, int port, 1715 const struct switchdev_obj_mrp *mrp) 1716 { 1717 struct ocelot *ocelot = ds->priv; 1718 1719 return ocelot_mrp_add(ocelot, port, mrp); 1720 } 1721 1722 static int felix_mrp_del(struct dsa_switch *ds, int port, 1723 const struct switchdev_obj_mrp *mrp) 1724 { 1725 struct ocelot *ocelot = ds->priv; 1726 1727 return ocelot_mrp_add(ocelot, port, mrp); 1728 } 1729 1730 static int 1731 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1732 const struct switchdev_obj_ring_role_mrp *mrp) 1733 { 1734 struct ocelot *ocelot = ds->priv; 1735 1736 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1737 } 1738 1739 static int 1740 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1741 const struct switchdev_obj_ring_role_mrp *mrp) 1742 { 1743 struct ocelot *ocelot = ds->priv; 1744 1745 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1746 } 1747 1748 static int felix_port_get_default_prio(struct dsa_switch *ds, int port) 1749 { 1750 struct ocelot *ocelot = ds->priv; 1751 1752 return ocelot_port_get_default_prio(ocelot, port); 1753 } 1754 1755 static int felix_port_set_default_prio(struct dsa_switch *ds, int port, 1756 u8 prio) 1757 { 1758 struct ocelot *ocelot = ds->priv; 1759 1760 return ocelot_port_set_default_prio(ocelot, port, prio); 1761 } 1762 1763 static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) 1764 { 1765 struct ocelot *ocelot = ds->priv; 1766 1767 return ocelot_port_get_dscp_prio(ocelot, port, dscp); 1768 } 1769 1770 static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, 1771 u8 prio) 1772 { 1773 struct ocelot *ocelot = ds->priv; 1774 1775 return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio); 1776 } 1777 1778 static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, 1779 u8 prio) 1780 { 1781 struct ocelot *ocelot = ds->priv; 1782 1783 return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio); 1784 } 1785 1786 const struct dsa_switch_ops felix_switch_ops = { 1787 .get_tag_protocol = felix_get_tag_protocol, 1788 .change_tag_protocol = felix_change_tag_protocol, 1789 .connect_tag_protocol = felix_connect_tag_protocol, 1790 .setup = felix_setup, 1791 .teardown = felix_teardown, 1792 .set_ageing_time = felix_set_ageing_time, 1793 .get_strings = felix_get_strings, 1794 .get_ethtool_stats = felix_get_ethtool_stats, 1795 .get_sset_count = felix_get_sset_count, 1796 .get_ts_info = felix_get_ts_info, 1797 .phylink_get_caps = felix_phylink_get_caps, 1798 .phylink_validate = felix_phylink_validate, 1799 .phylink_mac_select_pcs = felix_phylink_mac_select_pcs, 1800 .phylink_mac_link_down = felix_phylink_mac_link_down, 1801 .phylink_mac_link_up = felix_phylink_mac_link_up, 1802 .port_fast_age = felix_port_fast_age, 1803 .port_fdb_dump = felix_fdb_dump, 1804 .port_fdb_add = felix_fdb_add, 1805 .port_fdb_del = felix_fdb_del, 1806 .lag_fdb_add = felix_lag_fdb_add, 1807 .lag_fdb_del = felix_lag_fdb_del, 1808 .port_mdb_add = felix_mdb_add, 1809 .port_mdb_del = felix_mdb_del, 1810 .port_pre_bridge_flags = felix_pre_bridge_flags, 1811 .port_bridge_flags = felix_bridge_flags, 1812 .port_bridge_join = felix_bridge_join, 1813 .port_bridge_leave = felix_bridge_leave, 1814 .port_lag_join = felix_lag_join, 1815 .port_lag_leave = felix_lag_leave, 1816 .port_lag_change = felix_lag_change, 1817 .port_stp_state_set = felix_bridge_stp_state_set, 1818 .port_vlan_filtering = felix_vlan_filtering, 1819 .port_vlan_add = felix_vlan_add, 1820 .port_vlan_del = felix_vlan_del, 1821 .port_hwtstamp_get = felix_hwtstamp_get, 1822 .port_hwtstamp_set = felix_hwtstamp_set, 1823 .port_rxtstamp = felix_rxtstamp, 1824 .port_txtstamp = felix_txtstamp, 1825 .port_change_mtu = felix_change_mtu, 1826 .port_max_mtu = felix_get_max_mtu, 1827 .port_policer_add = felix_port_policer_add, 1828 .port_policer_del = felix_port_policer_del, 1829 .port_mirror_add = felix_port_mirror_add, 1830 .port_mirror_del = felix_port_mirror_del, 1831 .cls_flower_add = felix_cls_flower_add, 1832 .cls_flower_del = felix_cls_flower_del, 1833 .cls_flower_stats = felix_cls_flower_stats, 1834 .port_setup_tc = felix_port_setup_tc, 1835 .devlink_sb_pool_get = felix_sb_pool_get, 1836 .devlink_sb_pool_set = felix_sb_pool_set, 1837 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1838 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1839 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1840 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1841 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1842 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1843 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1844 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1845 .port_mrp_add = felix_mrp_add, 1846 .port_mrp_del = felix_mrp_del, 1847 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1848 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1849 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1850 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1851 .port_get_default_prio = felix_port_get_default_prio, 1852 .port_set_default_prio = felix_port_set_default_prio, 1853 .port_get_dscp_prio = felix_port_get_dscp_prio, 1854 .port_add_dscp_prio = felix_port_add_dscp_prio, 1855 .port_del_dscp_prio = felix_port_del_dscp_prio, 1856 }; 1857 1858 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1859 { 1860 struct felix *felix = ocelot_to_felix(ocelot); 1861 struct dsa_switch *ds = felix->ds; 1862 1863 if (!dsa_is_user_port(ds, port)) 1864 return NULL; 1865 1866 return dsa_to_port(ds, port)->slave; 1867 } 1868 1869 int felix_netdev_to_port(struct net_device *dev) 1870 { 1871 struct dsa_port *dp; 1872 1873 dp = dsa_port_from_netdev(dev); 1874 if (IS_ERR(dp)) 1875 return -EINVAL; 1876 1877 return dp->index; 1878 } 1879