1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <net/pkt_sched.h> 25 #include <net/dsa.h> 26 #include "felix.h" 27 28 /* Translate the DSA database API into the ocelot switch library API, 29 * which uses VID 0 for all ports that aren't part of a bridge, 30 * and expects the bridge_dev to be NULL in that case. 31 */ 32 static struct net_device *felix_classify_db(struct dsa_db db) 33 { 34 switch (db.type) { 35 case DSA_DB_PORT: 36 case DSA_DB_LAG: 37 return NULL; 38 case DSA_DB_BRIDGE: 39 return db.bridge.dev; 40 default: 41 return ERR_PTR(-EOPNOTSUPP); 42 } 43 } 44 45 static int felix_cpu_port_for_master(struct dsa_switch *ds, 46 struct net_device *master) 47 { 48 struct ocelot *ocelot = ds->priv; 49 struct dsa_port *cpu_dp; 50 int lag; 51 52 if (netif_is_lag_master(master)) { 53 mutex_lock(&ocelot->fwd_domain_lock); 54 lag = ocelot_bond_get_id(ocelot, master); 55 mutex_unlock(&ocelot->fwd_domain_lock); 56 57 return lag; 58 } 59 60 cpu_dp = master->dsa_ptr; 61 return cpu_dp->index; 62 } 63 64 /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that 65 * the tagger can perform RX source port identification. 66 */ 67 static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port, 68 int upstream, u16 vid) 69 { 70 struct ocelot_vcap_filter *outer_tagging_rule; 71 struct ocelot *ocelot = ds->priv; 72 unsigned long cookie; 73 int key_length, err; 74 75 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 76 77 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 78 GFP_KERNEL); 79 if (!outer_tagging_rule) 80 return -ENOMEM; 81 82 cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); 83 84 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 85 outer_tagging_rule->prio = 1; 86 outer_tagging_rule->id.cookie = cookie; 87 outer_tagging_rule->id.tc_offload = false; 88 outer_tagging_rule->block_id = VCAP_ES0; 89 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 90 outer_tagging_rule->lookup = 0; 91 outer_tagging_rule->ingress_port.value = port; 92 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 93 outer_tagging_rule->egress_port.value = upstream; 94 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 95 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 96 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 97 outer_tagging_rule->action.tag_a_vid_sel = 1; 98 outer_tagging_rule->action.vid_a_val = vid; 99 100 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 101 if (err) 102 kfree(outer_tagging_rule); 103 104 return err; 105 } 106 107 static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port, 108 int upstream, u16 vid) 109 { 110 struct ocelot_vcap_filter *outer_tagging_rule; 111 struct ocelot_vcap_block *block_vcap_es0; 112 struct ocelot *ocelot = ds->priv; 113 unsigned long cookie; 114 115 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 116 cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); 117 118 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 119 cookie, false); 120 if (!outer_tagging_rule) 121 return -ENOENT; 122 123 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 124 } 125 126 /* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 127 * rules for steering those tagged packets towards the correct destination port 128 */ 129 static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port, 130 u16 vid) 131 { 132 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 133 unsigned long cpu_ports = dsa_cpu_ports(ds); 134 struct ocelot *ocelot = ds->priv; 135 unsigned long cookie; 136 int err; 137 138 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 139 if (!untagging_rule) 140 return -ENOMEM; 141 142 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 143 if (!redirect_rule) { 144 kfree(untagging_rule); 145 return -ENOMEM; 146 } 147 148 cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); 149 150 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 151 untagging_rule->ingress_port_mask = cpu_ports; 152 untagging_rule->vlan.vid.value = vid; 153 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 154 untagging_rule->prio = 1; 155 untagging_rule->id.cookie = cookie; 156 untagging_rule->id.tc_offload = false; 157 untagging_rule->block_id = VCAP_IS1; 158 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 159 untagging_rule->lookup = 0; 160 untagging_rule->action.vlan_pop_cnt_ena = true; 161 untagging_rule->action.vlan_pop_cnt = 1; 162 untagging_rule->action.pag_override_mask = 0xff; 163 untagging_rule->action.pag_val = port; 164 165 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 166 if (err) { 167 kfree(untagging_rule); 168 kfree(redirect_rule); 169 return err; 170 } 171 172 cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); 173 174 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 175 redirect_rule->ingress_port_mask = cpu_ports; 176 redirect_rule->pag = port; 177 redirect_rule->prio = 1; 178 redirect_rule->id.cookie = cookie; 179 redirect_rule->id.tc_offload = false; 180 redirect_rule->block_id = VCAP_IS2; 181 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 182 redirect_rule->lookup = 0; 183 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 184 redirect_rule->action.port_mask = BIT(port); 185 186 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 187 if (err) { 188 ocelot_vcap_filter_del(ocelot, untagging_rule); 189 kfree(redirect_rule); 190 return err; 191 } 192 193 return 0; 194 } 195 196 static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid) 197 { 198 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 199 struct ocelot_vcap_block *block_vcap_is1; 200 struct ocelot_vcap_block *block_vcap_is2; 201 struct ocelot *ocelot = ds->priv; 202 unsigned long cookie; 203 int err; 204 205 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 206 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 207 208 cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); 209 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 210 cookie, false); 211 if (!untagging_rule) 212 return -ENOENT; 213 214 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 215 if (err) 216 return err; 217 218 cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); 219 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 220 cookie, false); 221 if (!redirect_rule) 222 return -ENOENT; 223 224 return ocelot_vcap_filter_del(ocelot, redirect_rule); 225 } 226 227 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 228 u16 flags) 229 { 230 struct dsa_port *cpu_dp; 231 int err; 232 233 /* tag_8021q.c assumes we are implementing this via port VLAN 234 * membership, which we aren't. So we don't need to add any VCAP filter 235 * for the CPU port. 236 */ 237 if (!dsa_is_user_port(ds, port)) 238 return 0; 239 240 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 241 err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); 242 if (err) 243 return err; 244 } 245 246 err = felix_tag_8021q_vlan_add_tx(ds, port, vid); 247 if (err) 248 goto add_tx_failed; 249 250 return 0; 251 252 add_tx_failed: 253 dsa_switch_for_each_cpu_port(cpu_dp, ds) 254 felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); 255 256 return err; 257 } 258 259 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 260 { 261 struct dsa_port *cpu_dp; 262 int err; 263 264 if (!dsa_is_user_port(ds, port)) 265 return 0; 266 267 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 268 err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); 269 if (err) 270 return err; 271 } 272 273 err = felix_tag_8021q_vlan_del_tx(ds, port, vid); 274 if (err) 275 goto del_tx_failed; 276 277 return 0; 278 279 del_tx_failed: 280 dsa_switch_for_each_cpu_port(cpu_dp, ds) 281 felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); 282 283 return err; 284 } 285 286 static int felix_trap_get_cpu_port(struct dsa_switch *ds, 287 const struct ocelot_vcap_filter *trap) 288 { 289 struct dsa_port *dp; 290 int first_port; 291 292 if (WARN_ON(!trap->ingress_port_mask)) 293 return -1; 294 295 first_port = __ffs(trap->ingress_port_mask); 296 dp = dsa_to_port(ds, first_port); 297 298 return dp->cpu_dp->index; 299 } 300 301 /* On switches with no extraction IRQ wired, trapped packets need to be 302 * replicated over Ethernet as well, otherwise we'd get no notification of 303 * their arrival when using the ocelot-8021q tagging protocol. 304 */ 305 static int felix_update_trapping_destinations(struct dsa_switch *ds, 306 bool using_tag_8021q) 307 { 308 struct ocelot *ocelot = ds->priv; 309 struct felix *felix = ocelot_to_felix(ocelot); 310 struct ocelot_vcap_block *block_vcap_is2; 311 struct ocelot_vcap_filter *trap; 312 enum ocelot_mask_mode mask_mode; 313 unsigned long port_mask; 314 bool cpu_copy_ena; 315 int err; 316 317 if (!felix->info->quirk_no_xtr_irq) 318 return 0; 319 320 /* We are sure that "cpu" was found, otherwise 321 * dsa_tree_setup_default_cpu() would have failed earlier. 322 */ 323 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 324 325 /* Make sure all traps are set up for that destination */ 326 list_for_each_entry(trap, &block_vcap_is2->rules, list) { 327 if (!trap->is_trap) 328 continue; 329 330 /* Figure out the current trapping destination */ 331 if (using_tag_8021q) { 332 /* Redirect to the tag_8021q CPU port. If timestamps 333 * are necessary, also copy trapped packets to the CPU 334 * port module. 335 */ 336 mask_mode = OCELOT_MASK_MODE_REDIRECT; 337 port_mask = BIT(felix_trap_get_cpu_port(ds, trap)); 338 cpu_copy_ena = !!trap->take_ts; 339 } else { 340 /* Trap packets only to the CPU port module, which is 341 * redirected to the NPI port (the DSA CPU port) 342 */ 343 mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 344 port_mask = 0; 345 cpu_copy_ena = true; 346 } 347 348 if (trap->action.mask_mode == mask_mode && 349 trap->action.port_mask == port_mask && 350 trap->action.cpu_copy_ena == cpu_copy_ena) 351 continue; 352 353 trap->action.mask_mode = mask_mode; 354 trap->action.port_mask = port_mask; 355 trap->action.cpu_copy_ena = cpu_copy_ena; 356 357 err = ocelot_vcap_filter_replace(ocelot, trap); 358 if (err) 359 return err; 360 } 361 362 return 0; 363 } 364 365 /* The CPU port module is connected to the Node Processor Interface (NPI). This 366 * is the mode through which frames can be injected from and extracted to an 367 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 368 * running Linux, and this forms a DSA setup together with the enetc or fman 369 * DSA master. 370 */ 371 static void felix_npi_port_init(struct ocelot *ocelot, int port) 372 { 373 ocelot->npi = port; 374 375 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 376 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 377 QSYS_EXT_CPU_CFG); 378 379 /* NPI port Injection/Extraction configuration */ 380 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 381 ocelot->npi_xtr_prefix); 382 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 383 ocelot->npi_inj_prefix); 384 385 /* Disable transmission of pause frames */ 386 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 387 } 388 389 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 390 { 391 /* Restore hardware defaults */ 392 int unused_port = ocelot->num_phys_ports + 2; 393 394 ocelot->npi = -1; 395 396 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 397 QSYS_EXT_CPU_CFG); 398 399 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 400 OCELOT_TAG_PREFIX_DISABLED); 401 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 402 OCELOT_TAG_PREFIX_DISABLED); 403 404 /* Enable transmission of pause frames */ 405 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 406 } 407 408 static int felix_tag_npi_setup(struct dsa_switch *ds) 409 { 410 struct dsa_port *dp, *first_cpu_dp = NULL; 411 struct ocelot *ocelot = ds->priv; 412 413 dsa_switch_for_each_user_port(dp, ds) { 414 if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { 415 dev_err(ds->dev, "Multiple NPI ports not supported\n"); 416 return -EINVAL; 417 } 418 419 first_cpu_dp = dp->cpu_dp; 420 } 421 422 if (!first_cpu_dp) 423 return -EINVAL; 424 425 felix_npi_port_init(ocelot, first_cpu_dp->index); 426 427 return 0; 428 } 429 430 static void felix_tag_npi_teardown(struct dsa_switch *ds) 431 { 432 struct ocelot *ocelot = ds->priv; 433 434 felix_npi_port_deinit(ocelot, ocelot->npi); 435 } 436 437 static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) 438 { 439 struct ocelot *ocelot = ds->priv; 440 441 return BIT(ocelot->num_phys_ports); 442 } 443 444 static int felix_tag_npi_change_master(struct dsa_switch *ds, int port, 445 struct net_device *master, 446 struct netlink_ext_ack *extack) 447 { 448 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 449 struct ocelot *ocelot = ds->priv; 450 451 if (netif_is_lag_master(master)) { 452 NL_SET_ERR_MSG_MOD(extack, 453 "LAG DSA master only supported using ocelot-8021q"); 454 return -EOPNOTSUPP; 455 } 456 457 /* Changing the NPI port breaks user ports still assigned to the old 458 * one, so only allow it while they're down, and don't allow them to 459 * come back up until they're all changed to the new one. 460 */ 461 dsa_switch_for_each_user_port(other_dp, ds) { 462 struct net_device *slave = other_dp->slave; 463 464 if (other_dp != dp && (slave->flags & IFF_UP) && 465 dsa_port_to_master(other_dp) != master) { 466 NL_SET_ERR_MSG_MOD(extack, 467 "Cannot change while old master still has users"); 468 return -EOPNOTSUPP; 469 } 470 } 471 472 felix_npi_port_deinit(ocelot, ocelot->npi); 473 felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master)); 474 475 return 0; 476 } 477 478 /* Alternatively to using the NPI functionality, that same hardware MAC 479 * connected internally to the enetc or fman DSA master can be configured to 480 * use the software-defined tag_8021q frame format. As far as the hardware is 481 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 482 * module are now disconnected from it, but can still be accessed through 483 * register-based MMIO. 484 */ 485 static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { 486 .setup = felix_tag_npi_setup, 487 .teardown = felix_tag_npi_teardown, 488 .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, 489 .change_master = felix_tag_npi_change_master, 490 }; 491 492 static int felix_tag_8021q_setup(struct dsa_switch *ds) 493 { 494 struct ocelot *ocelot = ds->priv; 495 struct dsa_port *dp; 496 int err; 497 498 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 499 if (err) 500 return err; 501 502 dsa_switch_for_each_cpu_port(dp, ds) 503 ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index); 504 505 dsa_switch_for_each_user_port(dp, ds) 506 ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index, 507 dp->cpu_dp->index); 508 509 dsa_switch_for_each_available_port(dp, ds) 510 /* This overwrites ocelot_init(): 511 * Do not forward BPDU frames to the CPU port module, 512 * for 2 reasons: 513 * - When these packets are injected from the tag_8021q 514 * CPU port, we want them to go out, not loop back 515 * into the system. 516 * - STP traffic ingressing on a user port should go to 517 * the tag_8021q CPU port, not to the hardware CPU 518 * port module. 519 */ 520 ocelot_write_gix(ocelot, 521 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 522 ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); 523 524 /* The ownership of the CPU port module's queues might have just been 525 * transferred to the tag_8021q tagger from the NPI-based tagger. 526 * So there might still be all sorts of crap in the queues. On the 527 * other hand, the MMIO-based matching of PTP frames is very brittle, 528 * so we need to be careful that there are no extra frames to be 529 * dequeued over MMIO, since we would never know to discard them. 530 */ 531 ocelot_drain_cpu_queue(ocelot, 0); 532 533 return 0; 534 } 535 536 static void felix_tag_8021q_teardown(struct dsa_switch *ds) 537 { 538 struct ocelot *ocelot = ds->priv; 539 struct dsa_port *dp; 540 541 dsa_switch_for_each_available_port(dp, ds) 542 /* Restore the logic from ocelot_init: 543 * do not forward BPDU frames to the front ports. 544 */ 545 ocelot_write_gix(ocelot, 546 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 547 ANA_PORT_CPU_FWD_BPDU_CFG, 548 dp->index); 549 550 dsa_switch_for_each_user_port(dp, ds) 551 ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index); 552 553 dsa_switch_for_each_cpu_port(dp, ds) 554 ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index); 555 556 dsa_tag_8021q_unregister(ds); 557 } 558 559 static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) 560 { 561 return dsa_cpu_ports(ds); 562 } 563 564 static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port, 565 struct net_device *master, 566 struct netlink_ext_ack *extack) 567 { 568 int cpu = felix_cpu_port_for_master(ds, master); 569 struct ocelot *ocelot = ds->priv; 570 571 ocelot_port_unassign_dsa_8021q_cpu(ocelot, port); 572 ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu); 573 574 return felix_update_trapping_destinations(ds, true); 575 } 576 577 static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { 578 .setup = felix_tag_8021q_setup, 579 .teardown = felix_tag_8021q_teardown, 580 .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, 581 .change_master = felix_tag_8021q_change_master, 582 }; 583 584 static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, 585 bool uc, bool mc, bool bc) 586 { 587 struct ocelot *ocelot = ds->priv; 588 unsigned long val; 589 590 val = uc ? mask : 0; 591 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); 592 593 val = mc ? mask : 0; 594 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); 595 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); 596 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); 597 598 val = bc ? mask : 0; 599 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC); 600 } 601 602 static void 603 felix_migrate_host_flood(struct dsa_switch *ds, 604 const struct felix_tag_proto_ops *proto_ops, 605 const struct felix_tag_proto_ops *old_proto_ops) 606 { 607 struct ocelot *ocelot = ds->priv; 608 struct felix *felix = ocelot_to_felix(ocelot); 609 unsigned long mask; 610 611 if (old_proto_ops) { 612 mask = old_proto_ops->get_host_fwd_mask(ds); 613 felix_set_host_flood(ds, mask, false, false, false); 614 } 615 616 mask = proto_ops->get_host_fwd_mask(ds); 617 felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, 618 !!felix->host_flood_mc_mask, true); 619 } 620 621 static int felix_migrate_mdbs(struct dsa_switch *ds, 622 const struct felix_tag_proto_ops *proto_ops, 623 const struct felix_tag_proto_ops *old_proto_ops) 624 { 625 struct ocelot *ocelot = ds->priv; 626 unsigned long from, to; 627 628 if (!old_proto_ops) 629 return 0; 630 631 from = old_proto_ops->get_host_fwd_mask(ds); 632 to = proto_ops->get_host_fwd_mask(ds); 633 634 return ocelot_migrate_mdbs(ocelot, from, to); 635 } 636 637 /* Configure the shared hardware resources for a transition between 638 * @old_proto_ops and @proto_ops. 639 * Manual migration is needed because as far as DSA is concerned, no change of 640 * the CPU port is taking place here, just of the tagging protocol. 641 */ 642 static int 643 felix_tag_proto_setup_shared(struct dsa_switch *ds, 644 const struct felix_tag_proto_ops *proto_ops, 645 const struct felix_tag_proto_ops *old_proto_ops) 646 { 647 bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); 648 int err; 649 650 err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); 651 if (err) 652 return err; 653 654 felix_update_trapping_destinations(ds, using_tag_8021q); 655 656 felix_migrate_host_flood(ds, proto_ops, old_proto_ops); 657 658 return 0; 659 } 660 661 /* This always leaves the switch in a consistent state, because although the 662 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 663 * or the restoration is guaranteed to work. 664 */ 665 static int felix_change_tag_protocol(struct dsa_switch *ds, 666 enum dsa_tag_protocol proto) 667 { 668 const struct felix_tag_proto_ops *old_proto_ops, *proto_ops; 669 struct ocelot *ocelot = ds->priv; 670 struct felix *felix = ocelot_to_felix(ocelot); 671 int err; 672 673 switch (proto) { 674 case DSA_TAG_PROTO_SEVILLE: 675 case DSA_TAG_PROTO_OCELOT: 676 proto_ops = &felix_tag_npi_proto_ops; 677 break; 678 case DSA_TAG_PROTO_OCELOT_8021Q: 679 proto_ops = &felix_tag_8021q_proto_ops; 680 break; 681 default: 682 return -EPROTONOSUPPORT; 683 } 684 685 old_proto_ops = felix->tag_proto_ops; 686 687 if (proto_ops == old_proto_ops) 688 return 0; 689 690 err = proto_ops->setup(ds); 691 if (err) 692 goto setup_failed; 693 694 err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); 695 if (err) 696 goto setup_shared_failed; 697 698 if (old_proto_ops) 699 old_proto_ops->teardown(ds); 700 701 felix->tag_proto_ops = proto_ops; 702 felix->tag_proto = proto; 703 704 return 0; 705 706 setup_shared_failed: 707 proto_ops->teardown(ds); 708 setup_failed: 709 return err; 710 } 711 712 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 713 int port, 714 enum dsa_tag_protocol mp) 715 { 716 struct ocelot *ocelot = ds->priv; 717 struct felix *felix = ocelot_to_felix(ocelot); 718 719 return felix->tag_proto; 720 } 721 722 static void felix_port_set_host_flood(struct dsa_switch *ds, int port, 723 bool uc, bool mc) 724 { 725 struct ocelot *ocelot = ds->priv; 726 struct felix *felix = ocelot_to_felix(ocelot); 727 unsigned long mask; 728 729 if (uc) 730 felix->host_flood_uc_mask |= BIT(port); 731 else 732 felix->host_flood_uc_mask &= ~BIT(port); 733 734 if (mc) 735 felix->host_flood_mc_mask |= BIT(port); 736 else 737 felix->host_flood_mc_mask &= ~BIT(port); 738 739 mask = felix->tag_proto_ops->get_host_fwd_mask(ds); 740 felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, 741 !!felix->host_flood_mc_mask, true); 742 } 743 744 static int felix_port_change_master(struct dsa_switch *ds, int port, 745 struct net_device *master, 746 struct netlink_ext_ack *extack) 747 { 748 struct ocelot *ocelot = ds->priv; 749 struct felix *felix = ocelot_to_felix(ocelot); 750 751 return felix->tag_proto_ops->change_master(ds, port, master, extack); 752 } 753 754 static int felix_set_ageing_time(struct dsa_switch *ds, 755 unsigned int ageing_time) 756 { 757 struct ocelot *ocelot = ds->priv; 758 759 ocelot_set_ageing_time(ocelot, ageing_time); 760 761 return 0; 762 } 763 764 static void felix_port_fast_age(struct dsa_switch *ds, int port) 765 { 766 struct ocelot *ocelot = ds->priv; 767 int err; 768 769 err = ocelot_mact_flush(ocelot, port); 770 if (err) 771 dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", 772 port, ERR_PTR(err)); 773 } 774 775 static int felix_fdb_dump(struct dsa_switch *ds, int port, 776 dsa_fdb_dump_cb_t *cb, void *data) 777 { 778 struct ocelot *ocelot = ds->priv; 779 780 return ocelot_fdb_dump(ocelot, port, cb, data); 781 } 782 783 static int felix_fdb_add(struct dsa_switch *ds, int port, 784 const unsigned char *addr, u16 vid, 785 struct dsa_db db) 786 { 787 struct net_device *bridge_dev = felix_classify_db(db); 788 struct dsa_port *dp = dsa_to_port(ds, port); 789 struct ocelot *ocelot = ds->priv; 790 791 if (IS_ERR(bridge_dev)) 792 return PTR_ERR(bridge_dev); 793 794 if (dsa_port_is_cpu(dp) && !bridge_dev && 795 dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) 796 return 0; 797 798 if (dsa_port_is_cpu(dp)) 799 port = PGID_CPU; 800 801 return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); 802 } 803 804 static int felix_fdb_del(struct dsa_switch *ds, int port, 805 const unsigned char *addr, u16 vid, 806 struct dsa_db db) 807 { 808 struct net_device *bridge_dev = felix_classify_db(db); 809 struct dsa_port *dp = dsa_to_port(ds, port); 810 struct ocelot *ocelot = ds->priv; 811 812 if (IS_ERR(bridge_dev)) 813 return PTR_ERR(bridge_dev); 814 815 if (dsa_port_is_cpu(dp) && !bridge_dev && 816 dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) 817 return 0; 818 819 if (dsa_port_is_cpu(dp)) 820 port = PGID_CPU; 821 822 return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); 823 } 824 825 static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, 826 const unsigned char *addr, u16 vid, 827 struct dsa_db db) 828 { 829 struct net_device *bridge_dev = felix_classify_db(db); 830 struct ocelot *ocelot = ds->priv; 831 832 if (IS_ERR(bridge_dev)) 833 return PTR_ERR(bridge_dev); 834 835 return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); 836 } 837 838 static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, 839 const unsigned char *addr, u16 vid, 840 struct dsa_db db) 841 { 842 struct net_device *bridge_dev = felix_classify_db(db); 843 struct ocelot *ocelot = ds->priv; 844 845 if (IS_ERR(bridge_dev)) 846 return PTR_ERR(bridge_dev); 847 848 return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); 849 } 850 851 static int felix_mdb_add(struct dsa_switch *ds, int port, 852 const struct switchdev_obj_port_mdb *mdb, 853 struct dsa_db db) 854 { 855 struct net_device *bridge_dev = felix_classify_db(db); 856 struct ocelot *ocelot = ds->priv; 857 858 if (IS_ERR(bridge_dev)) 859 return PTR_ERR(bridge_dev); 860 861 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 862 dsa_mdb_present_in_other_db(ds, port, mdb, db)) 863 return 0; 864 865 if (port == ocelot->npi) 866 port = ocelot->num_phys_ports; 867 868 return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); 869 } 870 871 static int felix_mdb_del(struct dsa_switch *ds, int port, 872 const struct switchdev_obj_port_mdb *mdb, 873 struct dsa_db db) 874 { 875 struct net_device *bridge_dev = felix_classify_db(db); 876 struct ocelot *ocelot = ds->priv; 877 878 if (IS_ERR(bridge_dev)) 879 return PTR_ERR(bridge_dev); 880 881 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 882 dsa_mdb_present_in_other_db(ds, port, mdb, db)) 883 return 0; 884 885 if (port == ocelot->npi) 886 port = ocelot->num_phys_ports; 887 888 return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); 889 } 890 891 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 892 u8 state) 893 { 894 struct ocelot *ocelot = ds->priv; 895 896 return ocelot_bridge_stp_state_set(ocelot, port, state); 897 } 898 899 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 900 struct switchdev_brport_flags val, 901 struct netlink_ext_ack *extack) 902 { 903 struct ocelot *ocelot = ds->priv; 904 905 return ocelot_port_pre_bridge_flags(ocelot, port, val); 906 } 907 908 static int felix_bridge_flags(struct dsa_switch *ds, int port, 909 struct switchdev_brport_flags val, 910 struct netlink_ext_ack *extack) 911 { 912 struct ocelot *ocelot = ds->priv; 913 914 if (port == ocelot->npi) 915 port = ocelot->num_phys_ports; 916 917 ocelot_port_bridge_flags(ocelot, port, val); 918 919 return 0; 920 } 921 922 static int felix_bridge_join(struct dsa_switch *ds, int port, 923 struct dsa_bridge bridge, bool *tx_fwd_offload, 924 struct netlink_ext_ack *extack) 925 { 926 struct ocelot *ocelot = ds->priv; 927 928 return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, 929 extack); 930 } 931 932 static void felix_bridge_leave(struct dsa_switch *ds, int port, 933 struct dsa_bridge bridge) 934 { 935 struct ocelot *ocelot = ds->priv; 936 937 ocelot_port_bridge_leave(ocelot, port, bridge.dev); 938 } 939 940 static int felix_lag_join(struct dsa_switch *ds, int port, 941 struct dsa_lag lag, 942 struct netdev_lag_upper_info *info, 943 struct netlink_ext_ack *extack) 944 { 945 struct ocelot *ocelot = ds->priv; 946 int err; 947 948 err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack); 949 if (err) 950 return err; 951 952 /* Update the logical LAG port that serves as tag_8021q CPU port */ 953 if (!dsa_is_cpu_port(ds, port)) 954 return 0; 955 956 return felix_port_change_master(ds, port, lag.dev, extack); 957 } 958 959 static int felix_lag_leave(struct dsa_switch *ds, int port, 960 struct dsa_lag lag) 961 { 962 struct ocelot *ocelot = ds->priv; 963 964 ocelot_port_lag_leave(ocelot, port, lag.dev); 965 966 /* Update the logical LAG port that serves as tag_8021q CPU port */ 967 if (!dsa_is_cpu_port(ds, port)) 968 return 0; 969 970 return felix_port_change_master(ds, port, lag.dev, NULL); 971 } 972 973 static int felix_lag_change(struct dsa_switch *ds, int port) 974 { 975 struct dsa_port *dp = dsa_to_port(ds, port); 976 struct ocelot *ocelot = ds->priv; 977 978 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 979 980 return 0; 981 } 982 983 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 984 const struct switchdev_obj_port_vlan *vlan, 985 struct netlink_ext_ack *extack) 986 { 987 struct ocelot *ocelot = ds->priv; 988 u16 flags = vlan->flags; 989 990 /* Ocelot switches copy frames as-is to the CPU, so the flags: 991 * egress-untagged or not, pvid or not, make no difference. This 992 * behavior is already better than what DSA just tries to approximate 993 * when it installs the VLAN with the same flags on the CPU port. 994 * Just accept any configuration, and don't let ocelot deny installing 995 * multiple native VLANs on the NPI port, because the switch doesn't 996 * look at the port tag settings towards the NPI interface anyway. 997 */ 998 if (port == ocelot->npi) 999 return 0; 1000 1001 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 1002 flags & BRIDGE_VLAN_INFO_PVID, 1003 flags & BRIDGE_VLAN_INFO_UNTAGGED, 1004 extack); 1005 } 1006 1007 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 1008 struct netlink_ext_ack *extack) 1009 { 1010 struct ocelot *ocelot = ds->priv; 1011 1012 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 1013 } 1014 1015 static int felix_vlan_add(struct dsa_switch *ds, int port, 1016 const struct switchdev_obj_port_vlan *vlan, 1017 struct netlink_ext_ack *extack) 1018 { 1019 struct ocelot *ocelot = ds->priv; 1020 u16 flags = vlan->flags; 1021 int err; 1022 1023 err = felix_vlan_prepare(ds, port, vlan, extack); 1024 if (err) 1025 return err; 1026 1027 return ocelot_vlan_add(ocelot, port, vlan->vid, 1028 flags & BRIDGE_VLAN_INFO_PVID, 1029 flags & BRIDGE_VLAN_INFO_UNTAGGED); 1030 } 1031 1032 static int felix_vlan_del(struct dsa_switch *ds, int port, 1033 const struct switchdev_obj_port_vlan *vlan) 1034 { 1035 struct ocelot *ocelot = ds->priv; 1036 1037 return ocelot_vlan_del(ocelot, port, vlan->vid); 1038 } 1039 1040 static void felix_phylink_get_caps(struct dsa_switch *ds, int port, 1041 struct phylink_config *config) 1042 { 1043 struct ocelot *ocelot = ds->priv; 1044 1045 /* This driver does not make use of the speed, duplex, pause or the 1046 * advertisement in its mac_config, so it is safe to mark this driver 1047 * as non-legacy. 1048 */ 1049 config->legacy_pre_march2020 = false; 1050 1051 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1052 MAC_10 | MAC_100 | MAC_1000FD | 1053 MAC_2500FD; 1054 1055 __set_bit(ocelot->ports[port]->phy_mode, 1056 config->supported_interfaces); 1057 } 1058 1059 static void felix_phylink_mac_config(struct dsa_switch *ds, int port, 1060 unsigned int mode, 1061 const struct phylink_link_state *state) 1062 { 1063 struct ocelot *ocelot = ds->priv; 1064 struct felix *felix = ocelot_to_felix(ocelot); 1065 1066 if (felix->info->phylink_mac_config) 1067 felix->info->phylink_mac_config(ocelot, port, mode, state); 1068 } 1069 1070 static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, 1071 int port, 1072 phy_interface_t iface) 1073 { 1074 struct ocelot *ocelot = ds->priv; 1075 struct felix *felix = ocelot_to_felix(ocelot); 1076 struct phylink_pcs *pcs = NULL; 1077 1078 if (felix->pcs && felix->pcs[port]) 1079 pcs = felix->pcs[port]; 1080 1081 return pcs; 1082 } 1083 1084 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 1085 unsigned int link_an_mode, 1086 phy_interface_t interface) 1087 { 1088 struct ocelot *ocelot = ds->priv; 1089 struct felix *felix; 1090 1091 felix = ocelot_to_felix(ocelot); 1092 1093 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 1094 felix->info->quirks); 1095 } 1096 1097 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 1098 unsigned int link_an_mode, 1099 phy_interface_t interface, 1100 struct phy_device *phydev, 1101 int speed, int duplex, 1102 bool tx_pause, bool rx_pause) 1103 { 1104 struct ocelot *ocelot = ds->priv; 1105 struct felix *felix = ocelot_to_felix(ocelot); 1106 1107 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 1108 interface, speed, duplex, tx_pause, rx_pause, 1109 felix->info->quirks); 1110 1111 if (felix->info->port_sched_speed_set) 1112 felix->info->port_sched_speed_set(ocelot, port, speed); 1113 } 1114 1115 static int felix_port_enable(struct dsa_switch *ds, int port, 1116 struct phy_device *phydev) 1117 { 1118 struct dsa_port *dp = dsa_to_port(ds, port); 1119 struct ocelot *ocelot = ds->priv; 1120 1121 if (!dsa_port_is_user(dp)) 1122 return 0; 1123 1124 if (ocelot->npi >= 0) { 1125 struct net_device *master = dsa_port_to_master(dp); 1126 1127 if (felix_cpu_port_for_master(ds, master) != ocelot->npi) { 1128 dev_err(ds->dev, "Multiple masters are not allowed\n"); 1129 return -EINVAL; 1130 } 1131 } 1132 1133 return 0; 1134 } 1135 1136 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 1137 { 1138 int i; 1139 1140 ocelot_rmw_gix(ocelot, 1141 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 1142 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 1143 ANA_PORT_QOS_CFG, 1144 port); 1145 1146 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 1147 ocelot_rmw_ix(ocelot, 1148 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 1149 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 1150 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 1151 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 1152 ANA_PORT_PCP_DEI_MAP, 1153 port, i); 1154 } 1155 } 1156 1157 static void felix_get_stats64(struct dsa_switch *ds, int port, 1158 struct rtnl_link_stats64 *stats) 1159 { 1160 struct ocelot *ocelot = ds->priv; 1161 1162 ocelot_port_get_stats64(ocelot, port, stats); 1163 } 1164 1165 static void felix_get_pause_stats(struct dsa_switch *ds, int port, 1166 struct ethtool_pause_stats *pause_stats) 1167 { 1168 struct ocelot *ocelot = ds->priv; 1169 1170 ocelot_port_get_pause_stats(ocelot, port, pause_stats); 1171 } 1172 1173 static void felix_get_rmon_stats(struct dsa_switch *ds, int port, 1174 struct ethtool_rmon_stats *rmon_stats, 1175 const struct ethtool_rmon_hist_range **ranges) 1176 { 1177 struct ocelot *ocelot = ds->priv; 1178 1179 ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges); 1180 } 1181 1182 static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port, 1183 struct ethtool_eth_ctrl_stats *ctrl_stats) 1184 { 1185 struct ocelot *ocelot = ds->priv; 1186 1187 ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats); 1188 } 1189 1190 static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port, 1191 struct ethtool_eth_mac_stats *mac_stats) 1192 { 1193 struct ocelot *ocelot = ds->priv; 1194 1195 ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats); 1196 } 1197 1198 static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port, 1199 struct ethtool_eth_phy_stats *phy_stats) 1200 { 1201 struct ocelot *ocelot = ds->priv; 1202 1203 ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats); 1204 } 1205 1206 static void felix_get_strings(struct dsa_switch *ds, int port, 1207 u32 stringset, u8 *data) 1208 { 1209 struct ocelot *ocelot = ds->priv; 1210 1211 return ocelot_get_strings(ocelot, port, stringset, data); 1212 } 1213 1214 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 1215 { 1216 struct ocelot *ocelot = ds->priv; 1217 1218 ocelot_get_ethtool_stats(ocelot, port, data); 1219 } 1220 1221 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 1222 { 1223 struct ocelot *ocelot = ds->priv; 1224 1225 return ocelot_get_sset_count(ocelot, port, sset); 1226 } 1227 1228 static int felix_get_ts_info(struct dsa_switch *ds, int port, 1229 struct ethtool_ts_info *info) 1230 { 1231 struct ocelot *ocelot = ds->priv; 1232 1233 return ocelot_get_ts_info(ocelot, port, info); 1234 } 1235 1236 static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { 1237 [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, 1238 [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, 1239 [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, 1240 [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, 1241 [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, 1242 [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, 1243 }; 1244 1245 static int felix_validate_phy_mode(struct felix *felix, int port, 1246 phy_interface_t phy_mode) 1247 { 1248 u32 modes = felix->info->port_modes[port]; 1249 1250 if (felix_phy_match_table[phy_mode] & modes) 1251 return 0; 1252 return -EOPNOTSUPP; 1253 } 1254 1255 static int felix_parse_ports_node(struct felix *felix, 1256 struct device_node *ports_node, 1257 phy_interface_t *port_phy_modes) 1258 { 1259 struct device *dev = felix->ocelot.dev; 1260 struct device_node *child; 1261 1262 for_each_available_child_of_node(ports_node, child) { 1263 phy_interface_t phy_mode; 1264 u32 port; 1265 int err; 1266 1267 /* Get switch port number from DT */ 1268 if (of_property_read_u32(child, "reg", &port) < 0) { 1269 dev_err(dev, "Port number not defined in device tree " 1270 "(property \"reg\")\n"); 1271 of_node_put(child); 1272 return -ENODEV; 1273 } 1274 1275 /* Get PHY mode from DT */ 1276 err = of_get_phy_mode(child, &phy_mode); 1277 if (err) { 1278 dev_err(dev, "Failed to read phy-mode or " 1279 "phy-interface-type property for port %d\n", 1280 port); 1281 of_node_put(child); 1282 return -ENODEV; 1283 } 1284 1285 err = felix_validate_phy_mode(felix, port, phy_mode); 1286 if (err < 0) { 1287 dev_info(dev, "Unsupported PHY mode %s on port %d\n", 1288 phy_modes(phy_mode), port); 1289 of_node_put(child); 1290 1291 /* Leave port_phy_modes[port] = 0, which is also 1292 * PHY_INTERFACE_MODE_NA. This will perform a 1293 * best-effort to bring up as many ports as possible. 1294 */ 1295 continue; 1296 } 1297 1298 port_phy_modes[port] = phy_mode; 1299 } 1300 1301 return 0; 1302 } 1303 1304 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 1305 { 1306 struct device *dev = felix->ocelot.dev; 1307 struct device_node *switch_node; 1308 struct device_node *ports_node; 1309 int err; 1310 1311 switch_node = dev->of_node; 1312 1313 ports_node = of_get_child_by_name(switch_node, "ports"); 1314 if (!ports_node) 1315 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 1316 if (!ports_node) { 1317 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); 1318 return -ENODEV; 1319 } 1320 1321 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 1322 of_node_put(ports_node); 1323 1324 return err; 1325 } 1326 1327 static struct regmap *felix_request_regmap_by_name(struct felix *felix, 1328 const char *resource_name) 1329 { 1330 struct ocelot *ocelot = &felix->ocelot; 1331 struct resource res; 1332 int i; 1333 1334 /* In an MFD configuration, regmaps are registered directly to the 1335 * parent device before the child devices are probed, so there is no 1336 * need to initialize a new one. 1337 */ 1338 if (!felix->info->resources) 1339 return dev_get_regmap(ocelot->dev->parent, resource_name); 1340 1341 for (i = 0; i < felix->info->num_resources; i++) { 1342 if (strcmp(resource_name, felix->info->resources[i].name)) 1343 continue; 1344 1345 memcpy(&res, &felix->info->resources[i], sizeof(res)); 1346 res.start += felix->switch_base; 1347 res.end += felix->switch_base; 1348 1349 return ocelot_regmap_init(ocelot, &res); 1350 } 1351 1352 return ERR_PTR(-ENOENT); 1353 } 1354 1355 static struct regmap *felix_request_regmap(struct felix *felix, 1356 enum ocelot_target target) 1357 { 1358 const char *resource_name = felix->info->resource_names[target]; 1359 1360 /* If the driver didn't provide a resource name for the target, 1361 * the resource is optional. 1362 */ 1363 if (!resource_name) 1364 return NULL; 1365 1366 return felix_request_regmap_by_name(felix, resource_name); 1367 } 1368 1369 static struct regmap *felix_request_port_regmap(struct felix *felix, int port) 1370 { 1371 char resource_name[32]; 1372 1373 sprintf(resource_name, "port%d", port); 1374 1375 return felix_request_regmap_by_name(felix, resource_name); 1376 } 1377 1378 static int felix_init_structs(struct felix *felix, int num_phys_ports) 1379 { 1380 struct ocelot *ocelot = &felix->ocelot; 1381 phy_interface_t *port_phy_modes; 1382 struct regmap *target; 1383 int port, i, err; 1384 1385 ocelot->num_phys_ports = num_phys_ports; 1386 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 1387 sizeof(struct ocelot_port *), GFP_KERNEL); 1388 if (!ocelot->ports) 1389 return -ENOMEM; 1390 1391 ocelot->map = felix->info->map; 1392 ocelot->num_mact_rows = felix->info->num_mact_rows; 1393 ocelot->vcap = felix->info->vcap; 1394 ocelot->vcap_pol.base = felix->info->vcap_pol_base; 1395 ocelot->vcap_pol.max = felix->info->vcap_pol_max; 1396 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; 1397 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; 1398 ocelot->ops = felix->info->ops; 1399 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1400 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1401 ocelot->devlink = felix->ds->devlink; 1402 1403 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1404 GFP_KERNEL); 1405 if (!port_phy_modes) 1406 return -ENOMEM; 1407 1408 err = felix_parse_dt(felix, port_phy_modes); 1409 if (err) { 1410 kfree(port_phy_modes); 1411 return err; 1412 } 1413 1414 for (i = 0; i < TARGET_MAX; i++) { 1415 target = felix_request_regmap(felix, i); 1416 if (IS_ERR(target)) { 1417 dev_err(ocelot->dev, 1418 "Failed to map device memory space: %pe\n", 1419 target); 1420 kfree(port_phy_modes); 1421 return PTR_ERR(target); 1422 } 1423 1424 ocelot->targets[i] = target; 1425 } 1426 1427 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1428 if (err) { 1429 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1430 kfree(port_phy_modes); 1431 return err; 1432 } 1433 1434 for (port = 0; port < num_phys_ports; port++) { 1435 struct ocelot_port *ocelot_port; 1436 1437 ocelot_port = devm_kzalloc(ocelot->dev, 1438 sizeof(struct ocelot_port), 1439 GFP_KERNEL); 1440 if (!ocelot_port) { 1441 dev_err(ocelot->dev, 1442 "failed to allocate port memory\n"); 1443 kfree(port_phy_modes); 1444 return -ENOMEM; 1445 } 1446 1447 target = felix_request_port_regmap(felix, port); 1448 if (IS_ERR(target)) { 1449 dev_err(ocelot->dev, 1450 "Failed to map memory space for port %d: %pe\n", 1451 port, target); 1452 kfree(port_phy_modes); 1453 return PTR_ERR(target); 1454 } 1455 1456 ocelot_port->phy_mode = port_phy_modes[port]; 1457 ocelot_port->ocelot = ocelot; 1458 ocelot_port->target = target; 1459 ocelot_port->index = port; 1460 ocelot->ports[port] = ocelot_port; 1461 } 1462 1463 kfree(port_phy_modes); 1464 1465 if (felix->info->mdio_bus_alloc) { 1466 err = felix->info->mdio_bus_alloc(ocelot); 1467 if (err < 0) 1468 return err; 1469 } 1470 1471 return 0; 1472 } 1473 1474 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1475 struct sk_buff *skb) 1476 { 1477 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1478 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1479 struct sk_buff *skb_match = NULL, *skb_tmp; 1480 unsigned long flags; 1481 1482 if (!clone) 1483 return; 1484 1485 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1486 1487 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1488 if (skb != clone) 1489 continue; 1490 __skb_unlink(skb, &ocelot_port->tx_skbs); 1491 skb_match = skb; 1492 break; 1493 } 1494 1495 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1496 1497 WARN_ONCE(!skb_match, 1498 "Could not find skb clone in TX timestamping list\n"); 1499 } 1500 1501 #define work_to_xmit_work(w) \ 1502 container_of((w), struct felix_deferred_xmit_work, work) 1503 1504 static void felix_port_deferred_xmit(struct kthread_work *work) 1505 { 1506 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1507 struct dsa_switch *ds = xmit_work->dp->ds; 1508 struct sk_buff *skb = xmit_work->skb; 1509 u32 rew_op = ocelot_ptp_rew_op(skb); 1510 struct ocelot *ocelot = ds->priv; 1511 int port = xmit_work->dp->index; 1512 int retries = 10; 1513 1514 do { 1515 if (ocelot_can_inject(ocelot, 0)) 1516 break; 1517 1518 cpu_relax(); 1519 } while (--retries); 1520 1521 if (!retries) { 1522 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1523 port); 1524 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1525 kfree_skb(skb); 1526 return; 1527 } 1528 1529 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1530 1531 consume_skb(skb); 1532 kfree(xmit_work); 1533 } 1534 1535 static int felix_connect_tag_protocol(struct dsa_switch *ds, 1536 enum dsa_tag_protocol proto) 1537 { 1538 struct ocelot_8021q_tagger_data *tagger_data; 1539 1540 switch (proto) { 1541 case DSA_TAG_PROTO_OCELOT_8021Q: 1542 tagger_data = ocelot_8021q_tagger_data(ds); 1543 tagger_data->xmit_work_fn = felix_port_deferred_xmit; 1544 return 0; 1545 case DSA_TAG_PROTO_OCELOT: 1546 case DSA_TAG_PROTO_SEVILLE: 1547 return 0; 1548 default: 1549 return -EPROTONOSUPPORT; 1550 } 1551 } 1552 1553 static int felix_setup(struct dsa_switch *ds) 1554 { 1555 struct ocelot *ocelot = ds->priv; 1556 struct felix *felix = ocelot_to_felix(ocelot); 1557 struct dsa_port *dp; 1558 int err; 1559 1560 err = felix_init_structs(felix, ds->num_ports); 1561 if (err) 1562 return err; 1563 1564 if (ocelot->targets[HSIO]) 1565 ocelot_pll5_init(ocelot); 1566 1567 err = ocelot_init(ocelot); 1568 if (err) 1569 goto out_mdiobus_free; 1570 1571 if (ocelot->ptp) { 1572 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1573 if (err) { 1574 dev_err(ocelot->dev, 1575 "Timestamp initialization failed\n"); 1576 ocelot->ptp = 0; 1577 } 1578 } 1579 1580 dsa_switch_for_each_available_port(dp, ds) { 1581 ocelot_init_port(ocelot, dp->index); 1582 1583 if (felix->info->configure_serdes) 1584 felix->info->configure_serdes(ocelot, dp->index, 1585 dp->dn); 1586 1587 /* Set the default QoS Classification based on PCP and DEI 1588 * bits of vlan tag. 1589 */ 1590 felix_port_qos_map_init(ocelot, dp->index); 1591 } 1592 1593 err = ocelot_devlink_sb_register(ocelot); 1594 if (err) 1595 goto out_deinit_ports; 1596 1597 /* The initial tag protocol is NPI which won't fail during initial 1598 * setup, there's no real point in checking for errors. 1599 */ 1600 felix_change_tag_protocol(ds, felix->tag_proto); 1601 1602 ds->mtu_enforcement_ingress = true; 1603 ds->assisted_learning_on_cpu_port = true; 1604 ds->fdb_isolation = true; 1605 ds->max_num_bridges = ds->num_ports; 1606 1607 return 0; 1608 1609 out_deinit_ports: 1610 dsa_switch_for_each_available_port(dp, ds) 1611 ocelot_deinit_port(ocelot, dp->index); 1612 1613 ocelot_deinit_timestamp(ocelot); 1614 ocelot_deinit(ocelot); 1615 1616 out_mdiobus_free: 1617 if (felix->info->mdio_bus_free) 1618 felix->info->mdio_bus_free(ocelot); 1619 1620 return err; 1621 } 1622 1623 static void felix_teardown(struct dsa_switch *ds) 1624 { 1625 struct ocelot *ocelot = ds->priv; 1626 struct felix *felix = ocelot_to_felix(ocelot); 1627 struct dsa_port *dp; 1628 1629 if (felix->tag_proto_ops) 1630 felix->tag_proto_ops->teardown(ds); 1631 1632 dsa_switch_for_each_available_port(dp, ds) 1633 ocelot_deinit_port(ocelot, dp->index); 1634 1635 ocelot_devlink_sb_unregister(ocelot); 1636 ocelot_deinit_timestamp(ocelot); 1637 ocelot_deinit(ocelot); 1638 1639 if (felix->info->mdio_bus_free) 1640 felix->info->mdio_bus_free(ocelot); 1641 } 1642 1643 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1644 struct ifreq *ifr) 1645 { 1646 struct ocelot *ocelot = ds->priv; 1647 1648 return ocelot_hwstamp_get(ocelot, port, ifr); 1649 } 1650 1651 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1652 struct ifreq *ifr) 1653 { 1654 struct ocelot *ocelot = ds->priv; 1655 struct felix *felix = ocelot_to_felix(ocelot); 1656 bool using_tag_8021q; 1657 int err; 1658 1659 err = ocelot_hwstamp_set(ocelot, port, ifr); 1660 if (err) 1661 return err; 1662 1663 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; 1664 1665 return felix_update_trapping_destinations(ds, using_tag_8021q); 1666 } 1667 1668 static bool felix_check_xtr_pkt(struct ocelot *ocelot) 1669 { 1670 struct felix *felix = ocelot_to_felix(ocelot); 1671 int err = 0, grp = 0; 1672 1673 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1674 return false; 1675 1676 if (!felix->info->quirk_no_xtr_irq) 1677 return false; 1678 1679 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1680 struct sk_buff *skb; 1681 unsigned int type; 1682 1683 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1684 if (err) 1685 goto out; 1686 1687 /* We trap to the CPU port module all PTP frames, but 1688 * felix_rxtstamp() only gets called for event frames. 1689 * So we need to avoid sending duplicate general 1690 * message frames by running a second BPF classifier 1691 * here and dropping those. 1692 */ 1693 __skb_push(skb, ETH_HLEN); 1694 1695 type = ptp_classify_raw(skb); 1696 1697 __skb_pull(skb, ETH_HLEN); 1698 1699 if (type == PTP_CLASS_NONE) { 1700 kfree_skb(skb); 1701 continue; 1702 } 1703 1704 netif_rx(skb); 1705 } 1706 1707 out: 1708 if (err < 0) { 1709 dev_err_ratelimited(ocelot->dev, 1710 "Error during packet extraction: %pe\n", 1711 ERR_PTR(err)); 1712 ocelot_drain_cpu_queue(ocelot, 0); 1713 } 1714 1715 return true; 1716 } 1717 1718 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1719 struct sk_buff *skb, unsigned int type) 1720 { 1721 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; 1722 struct skb_shared_hwtstamps *shhwtstamps; 1723 struct ocelot *ocelot = ds->priv; 1724 struct timespec64 ts; 1725 u32 tstamp_hi; 1726 u64 tstamp; 1727 1728 switch (type & PTP_CLASS_PMASK) { 1729 case PTP_CLASS_L2: 1730 if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2)) 1731 return false; 1732 break; 1733 case PTP_CLASS_IPV4: 1734 case PTP_CLASS_IPV6: 1735 if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4)) 1736 return false; 1737 break; 1738 } 1739 1740 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1741 * for RX timestamping. Then free it, and poll for its copy through 1742 * MMIO in the CPU port module, and inject that into the stack from 1743 * ocelot_xtr_poll(). 1744 */ 1745 if (felix_check_xtr_pkt(ocelot)) { 1746 kfree_skb(skb); 1747 return true; 1748 } 1749 1750 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1751 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1752 1753 tstamp_hi = tstamp >> 32; 1754 if ((tstamp & 0xffffffff) < tstamp_lo) 1755 tstamp_hi--; 1756 1757 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1758 1759 shhwtstamps = skb_hwtstamps(skb); 1760 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1761 shhwtstamps->hwtstamp = tstamp; 1762 return false; 1763 } 1764 1765 static void felix_txtstamp(struct dsa_switch *ds, int port, 1766 struct sk_buff *skb) 1767 { 1768 struct ocelot *ocelot = ds->priv; 1769 struct sk_buff *clone = NULL; 1770 1771 if (!ocelot->ptp) 1772 return; 1773 1774 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1775 dev_err_ratelimited(ds->dev, 1776 "port %d delivering skb without TX timestamp\n", 1777 port); 1778 return; 1779 } 1780 1781 if (clone) 1782 OCELOT_SKB_CB(skb)->clone = clone; 1783 } 1784 1785 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1786 { 1787 struct ocelot *ocelot = ds->priv; 1788 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1789 struct felix *felix = ocelot_to_felix(ocelot); 1790 1791 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1792 1793 mutex_lock(&ocelot->tas_lock); 1794 1795 if (ocelot_port->taprio && felix->info->tas_guard_bands_update) 1796 felix->info->tas_guard_bands_update(ocelot, port); 1797 1798 mutex_unlock(&ocelot->tas_lock); 1799 1800 return 0; 1801 } 1802 1803 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1804 { 1805 struct ocelot *ocelot = ds->priv; 1806 1807 return ocelot_get_max_mtu(ocelot, port); 1808 } 1809 1810 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1811 struct flow_cls_offload *cls, bool ingress) 1812 { 1813 struct ocelot *ocelot = ds->priv; 1814 struct felix *felix = ocelot_to_felix(ocelot); 1815 bool using_tag_8021q; 1816 int err; 1817 1818 err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1819 if (err) 1820 return err; 1821 1822 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; 1823 1824 return felix_update_trapping_destinations(ds, using_tag_8021q); 1825 } 1826 1827 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1828 struct flow_cls_offload *cls, bool ingress) 1829 { 1830 struct ocelot *ocelot = ds->priv; 1831 1832 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1833 } 1834 1835 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1836 struct flow_cls_offload *cls, bool ingress) 1837 { 1838 struct ocelot *ocelot = ds->priv; 1839 1840 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1841 } 1842 1843 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1844 struct dsa_mall_policer_tc_entry *policer) 1845 { 1846 struct ocelot *ocelot = ds->priv; 1847 struct ocelot_policer pol = { 1848 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1849 .burst = policer->burst, 1850 }; 1851 1852 return ocelot_port_policer_add(ocelot, port, &pol); 1853 } 1854 1855 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1856 { 1857 struct ocelot *ocelot = ds->priv; 1858 1859 ocelot_port_policer_del(ocelot, port); 1860 } 1861 1862 static int felix_port_mirror_add(struct dsa_switch *ds, int port, 1863 struct dsa_mall_mirror_tc_entry *mirror, 1864 bool ingress, struct netlink_ext_ack *extack) 1865 { 1866 struct ocelot *ocelot = ds->priv; 1867 1868 return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port, 1869 ingress, extack); 1870 } 1871 1872 static void felix_port_mirror_del(struct dsa_switch *ds, int port, 1873 struct dsa_mall_mirror_tc_entry *mirror) 1874 { 1875 struct ocelot *ocelot = ds->priv; 1876 1877 ocelot_port_mirror_del(ocelot, port, mirror->ingress); 1878 } 1879 1880 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1881 enum tc_setup_type type, 1882 void *type_data) 1883 { 1884 struct ocelot *ocelot = ds->priv; 1885 struct felix *felix = ocelot_to_felix(ocelot); 1886 1887 if (felix->info->port_setup_tc) 1888 return felix->info->port_setup_tc(ds, port, type, type_data); 1889 else 1890 return -EOPNOTSUPP; 1891 } 1892 1893 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1894 u16 pool_index, 1895 struct devlink_sb_pool_info *pool_info) 1896 { 1897 struct ocelot *ocelot = ds->priv; 1898 1899 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1900 } 1901 1902 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1903 u16 pool_index, u32 size, 1904 enum devlink_sb_threshold_type threshold_type, 1905 struct netlink_ext_ack *extack) 1906 { 1907 struct ocelot *ocelot = ds->priv; 1908 1909 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1910 threshold_type, extack); 1911 } 1912 1913 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1914 unsigned int sb_index, u16 pool_index, 1915 u32 *p_threshold) 1916 { 1917 struct ocelot *ocelot = ds->priv; 1918 1919 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1920 p_threshold); 1921 } 1922 1923 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1924 unsigned int sb_index, u16 pool_index, 1925 u32 threshold, struct netlink_ext_ack *extack) 1926 { 1927 struct ocelot *ocelot = ds->priv; 1928 1929 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1930 threshold, extack); 1931 } 1932 1933 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1934 unsigned int sb_index, u16 tc_index, 1935 enum devlink_sb_pool_type pool_type, 1936 u16 *p_pool_index, u32 *p_threshold) 1937 { 1938 struct ocelot *ocelot = ds->priv; 1939 1940 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1941 pool_type, p_pool_index, 1942 p_threshold); 1943 } 1944 1945 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1946 unsigned int sb_index, u16 tc_index, 1947 enum devlink_sb_pool_type pool_type, 1948 u16 pool_index, u32 threshold, 1949 struct netlink_ext_ack *extack) 1950 { 1951 struct ocelot *ocelot = ds->priv; 1952 1953 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1954 pool_type, pool_index, threshold, 1955 extack); 1956 } 1957 1958 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1959 unsigned int sb_index) 1960 { 1961 struct ocelot *ocelot = ds->priv; 1962 1963 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1964 } 1965 1966 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1967 unsigned int sb_index) 1968 { 1969 struct ocelot *ocelot = ds->priv; 1970 1971 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1972 } 1973 1974 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1975 unsigned int sb_index, u16 pool_index, 1976 u32 *p_cur, u32 *p_max) 1977 { 1978 struct ocelot *ocelot = ds->priv; 1979 1980 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1981 p_cur, p_max); 1982 } 1983 1984 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1985 unsigned int sb_index, u16 tc_index, 1986 enum devlink_sb_pool_type pool_type, 1987 u32 *p_cur, u32 *p_max) 1988 { 1989 struct ocelot *ocelot = ds->priv; 1990 1991 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1992 pool_type, p_cur, p_max); 1993 } 1994 1995 static int felix_mrp_add(struct dsa_switch *ds, int port, 1996 const struct switchdev_obj_mrp *mrp) 1997 { 1998 struct ocelot *ocelot = ds->priv; 1999 2000 return ocelot_mrp_add(ocelot, port, mrp); 2001 } 2002 2003 static int felix_mrp_del(struct dsa_switch *ds, int port, 2004 const struct switchdev_obj_mrp *mrp) 2005 { 2006 struct ocelot *ocelot = ds->priv; 2007 2008 return ocelot_mrp_add(ocelot, port, mrp); 2009 } 2010 2011 static int 2012 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 2013 const struct switchdev_obj_ring_role_mrp *mrp) 2014 { 2015 struct ocelot *ocelot = ds->priv; 2016 2017 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 2018 } 2019 2020 static int 2021 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 2022 const struct switchdev_obj_ring_role_mrp *mrp) 2023 { 2024 struct ocelot *ocelot = ds->priv; 2025 2026 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 2027 } 2028 2029 static int felix_port_get_default_prio(struct dsa_switch *ds, int port) 2030 { 2031 struct ocelot *ocelot = ds->priv; 2032 2033 return ocelot_port_get_default_prio(ocelot, port); 2034 } 2035 2036 static int felix_port_set_default_prio(struct dsa_switch *ds, int port, 2037 u8 prio) 2038 { 2039 struct ocelot *ocelot = ds->priv; 2040 2041 return ocelot_port_set_default_prio(ocelot, port, prio); 2042 } 2043 2044 static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) 2045 { 2046 struct ocelot *ocelot = ds->priv; 2047 2048 return ocelot_port_get_dscp_prio(ocelot, port, dscp); 2049 } 2050 2051 static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, 2052 u8 prio) 2053 { 2054 struct ocelot *ocelot = ds->priv; 2055 2056 return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio); 2057 } 2058 2059 static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, 2060 u8 prio) 2061 { 2062 struct ocelot *ocelot = ds->priv; 2063 2064 return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio); 2065 } 2066 2067 static int felix_get_mm(struct dsa_switch *ds, int port, 2068 struct ethtool_mm_state *state) 2069 { 2070 struct ocelot *ocelot = ds->priv; 2071 2072 return ocelot_port_get_mm(ocelot, port, state); 2073 } 2074 2075 static int felix_set_mm(struct dsa_switch *ds, int port, 2076 struct ethtool_mm_cfg *cfg, 2077 struct netlink_ext_ack *extack) 2078 { 2079 struct ocelot *ocelot = ds->priv; 2080 2081 return ocelot_port_set_mm(ocelot, port, cfg, extack); 2082 } 2083 2084 static void felix_get_mm_stats(struct dsa_switch *ds, int port, 2085 struct ethtool_mm_stats *stats) 2086 { 2087 struct ocelot *ocelot = ds->priv; 2088 2089 ocelot_port_get_mm_stats(ocelot, port, stats); 2090 } 2091 2092 const struct dsa_switch_ops felix_switch_ops = { 2093 .get_tag_protocol = felix_get_tag_protocol, 2094 .change_tag_protocol = felix_change_tag_protocol, 2095 .connect_tag_protocol = felix_connect_tag_protocol, 2096 .setup = felix_setup, 2097 .teardown = felix_teardown, 2098 .set_ageing_time = felix_set_ageing_time, 2099 .get_mm = felix_get_mm, 2100 .set_mm = felix_set_mm, 2101 .get_mm_stats = felix_get_mm_stats, 2102 .get_stats64 = felix_get_stats64, 2103 .get_pause_stats = felix_get_pause_stats, 2104 .get_rmon_stats = felix_get_rmon_stats, 2105 .get_eth_ctrl_stats = felix_get_eth_ctrl_stats, 2106 .get_eth_mac_stats = felix_get_eth_mac_stats, 2107 .get_eth_phy_stats = felix_get_eth_phy_stats, 2108 .get_strings = felix_get_strings, 2109 .get_ethtool_stats = felix_get_ethtool_stats, 2110 .get_sset_count = felix_get_sset_count, 2111 .get_ts_info = felix_get_ts_info, 2112 .phylink_get_caps = felix_phylink_get_caps, 2113 .phylink_mac_config = felix_phylink_mac_config, 2114 .phylink_mac_select_pcs = felix_phylink_mac_select_pcs, 2115 .phylink_mac_link_down = felix_phylink_mac_link_down, 2116 .phylink_mac_link_up = felix_phylink_mac_link_up, 2117 .port_enable = felix_port_enable, 2118 .port_fast_age = felix_port_fast_age, 2119 .port_fdb_dump = felix_fdb_dump, 2120 .port_fdb_add = felix_fdb_add, 2121 .port_fdb_del = felix_fdb_del, 2122 .lag_fdb_add = felix_lag_fdb_add, 2123 .lag_fdb_del = felix_lag_fdb_del, 2124 .port_mdb_add = felix_mdb_add, 2125 .port_mdb_del = felix_mdb_del, 2126 .port_pre_bridge_flags = felix_pre_bridge_flags, 2127 .port_bridge_flags = felix_bridge_flags, 2128 .port_bridge_join = felix_bridge_join, 2129 .port_bridge_leave = felix_bridge_leave, 2130 .port_lag_join = felix_lag_join, 2131 .port_lag_leave = felix_lag_leave, 2132 .port_lag_change = felix_lag_change, 2133 .port_stp_state_set = felix_bridge_stp_state_set, 2134 .port_vlan_filtering = felix_vlan_filtering, 2135 .port_vlan_add = felix_vlan_add, 2136 .port_vlan_del = felix_vlan_del, 2137 .port_hwtstamp_get = felix_hwtstamp_get, 2138 .port_hwtstamp_set = felix_hwtstamp_set, 2139 .port_rxtstamp = felix_rxtstamp, 2140 .port_txtstamp = felix_txtstamp, 2141 .port_change_mtu = felix_change_mtu, 2142 .port_max_mtu = felix_get_max_mtu, 2143 .port_policer_add = felix_port_policer_add, 2144 .port_policer_del = felix_port_policer_del, 2145 .port_mirror_add = felix_port_mirror_add, 2146 .port_mirror_del = felix_port_mirror_del, 2147 .cls_flower_add = felix_cls_flower_add, 2148 .cls_flower_del = felix_cls_flower_del, 2149 .cls_flower_stats = felix_cls_flower_stats, 2150 .port_setup_tc = felix_port_setup_tc, 2151 .devlink_sb_pool_get = felix_sb_pool_get, 2152 .devlink_sb_pool_set = felix_sb_pool_set, 2153 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 2154 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 2155 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 2156 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 2157 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 2158 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 2159 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 2160 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 2161 .port_mrp_add = felix_mrp_add, 2162 .port_mrp_del = felix_mrp_del, 2163 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 2164 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 2165 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 2166 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 2167 .port_get_default_prio = felix_port_get_default_prio, 2168 .port_set_default_prio = felix_port_set_default_prio, 2169 .port_get_dscp_prio = felix_port_get_dscp_prio, 2170 .port_add_dscp_prio = felix_port_add_dscp_prio, 2171 .port_del_dscp_prio = felix_port_del_dscp_prio, 2172 .port_set_host_flood = felix_port_set_host_flood, 2173 .port_change_master = felix_port_change_master, 2174 }; 2175 EXPORT_SYMBOL_GPL(felix_switch_ops); 2176 2177 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 2178 { 2179 struct felix *felix = ocelot_to_felix(ocelot); 2180 struct dsa_switch *ds = felix->ds; 2181 2182 if (!dsa_is_user_port(ds, port)) 2183 return NULL; 2184 2185 return dsa_to_port(ds, port)->slave; 2186 } 2187 EXPORT_SYMBOL_GPL(felix_port_to_netdev); 2188 2189 int felix_netdev_to_port(struct net_device *dev) 2190 { 2191 struct dsa_port *dp; 2192 2193 dp = dsa_port_from_netdev(dev); 2194 if (IS_ERR(dp)) 2195 return -EINVAL; 2196 2197 return dp->index; 2198 } 2199 EXPORT_SYMBOL_GPL(felix_netdev_to_port); 2200 2201 MODULE_DESCRIPTION("Felix DSA library"); 2202 MODULE_LICENSE("GPL"); 2203