1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <net/pkt_sched.h> 25 #include <net/dsa.h> 26 #include "felix.h" 27 28 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, 29 bool pvid, bool untagged) 30 { 31 struct ocelot_vcap_filter *outer_tagging_rule; 32 struct ocelot *ocelot = &felix->ocelot; 33 struct dsa_switch *ds = felix->ds; 34 int key_length, upstream, err; 35 36 /* We don't need to install the rxvlan into the other ports' filtering 37 * tables, because we're just pushing the rxvlan when sending towards 38 * the CPU 39 */ 40 if (!pvid) 41 return 0; 42 43 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 44 upstream = dsa_upstream_port(ds, port); 45 46 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 47 GFP_KERNEL); 48 if (!outer_tagging_rule) 49 return -ENOMEM; 50 51 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 52 outer_tagging_rule->prio = 1; 53 outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port); 54 outer_tagging_rule->id.tc_offload = false; 55 outer_tagging_rule->block_id = VCAP_ES0; 56 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 57 outer_tagging_rule->lookup = 0; 58 outer_tagging_rule->ingress_port.value = port; 59 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 60 outer_tagging_rule->egress_port.value = upstream; 61 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 62 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 63 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 64 outer_tagging_rule->action.tag_a_vid_sel = 1; 65 outer_tagging_rule->action.vid_a_val = vid; 66 67 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 68 if (err) 69 kfree(outer_tagging_rule); 70 71 return err; 72 } 73 74 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, 75 bool pvid, bool untagged) 76 { 77 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 78 struct ocelot *ocelot = &felix->ocelot; 79 struct dsa_switch *ds = felix->ds; 80 int upstream, err; 81 82 /* tag_8021q.c assumes we are implementing this via port VLAN 83 * membership, which we aren't. So we don't need to add any VCAP filter 84 * for the CPU port. 85 */ 86 if (ocelot->ports[port]->is_dsa_8021q_cpu) 87 return 0; 88 89 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 90 if (!untagging_rule) 91 return -ENOMEM; 92 93 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 94 if (!redirect_rule) { 95 kfree(untagging_rule); 96 return -ENOMEM; 97 } 98 99 upstream = dsa_upstream_port(ds, port); 100 101 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 102 untagging_rule->ingress_port_mask = BIT(upstream); 103 untagging_rule->vlan.vid.value = vid; 104 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 105 untagging_rule->prio = 1; 106 untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); 107 untagging_rule->id.tc_offload = false; 108 untagging_rule->block_id = VCAP_IS1; 109 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 110 untagging_rule->lookup = 0; 111 untagging_rule->action.vlan_pop_cnt_ena = true; 112 untagging_rule->action.vlan_pop_cnt = 1; 113 untagging_rule->action.pag_override_mask = 0xff; 114 untagging_rule->action.pag_val = port; 115 116 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 117 if (err) { 118 kfree(untagging_rule); 119 kfree(redirect_rule); 120 return err; 121 } 122 123 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 124 redirect_rule->ingress_port_mask = BIT(upstream); 125 redirect_rule->pag = port; 126 redirect_rule->prio = 1; 127 redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); 128 redirect_rule->id.tc_offload = false; 129 redirect_rule->block_id = VCAP_IS2; 130 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 131 redirect_rule->lookup = 0; 132 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 133 redirect_rule->action.port_mask = BIT(port); 134 135 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 136 if (err) { 137 ocelot_vcap_filter_del(ocelot, untagging_rule); 138 kfree(redirect_rule); 139 return err; 140 } 141 142 return 0; 143 } 144 145 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 146 u16 flags) 147 { 148 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 149 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 150 struct ocelot *ocelot = ds->priv; 151 152 if (vid_is_dsa_8021q_rxvlan(vid)) 153 return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), 154 port, vid, pvid, untagged); 155 156 if (vid_is_dsa_8021q_txvlan(vid)) 157 return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), 158 port, vid, pvid, untagged); 159 160 return 0; 161 } 162 163 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) 164 { 165 struct ocelot_vcap_filter *outer_tagging_rule; 166 struct ocelot_vcap_block *block_vcap_es0; 167 struct ocelot *ocelot = &felix->ocelot; 168 169 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 170 171 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 172 port, false); 173 /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid 174 * installing outer tagging ES0 rules where they weren't needed. 175 * But in rxvlan_del, the API doesn't give us the "flags" anymore, 176 * so that forces us to be slightly sloppy here, and just assume that 177 * if we didn't find an outer_tagging_rule it means that there was 178 * none in the first place, i.e. rxvlan_del is called on a non-pvid 179 * port. This is most probably true though. 180 */ 181 if (!outer_tagging_rule) 182 return 0; 183 184 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 185 } 186 187 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) 188 { 189 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 190 struct ocelot_vcap_block *block_vcap_is1; 191 struct ocelot_vcap_block *block_vcap_is2; 192 struct ocelot *ocelot = &felix->ocelot; 193 int err; 194 195 if (ocelot->ports[port]->is_dsa_8021q_cpu) 196 return 0; 197 198 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 199 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 200 201 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 202 port, false); 203 if (!untagging_rule) 204 return 0; 205 206 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 207 if (err) 208 return err; 209 210 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 211 port, false); 212 if (!redirect_rule) 213 return 0; 214 215 return ocelot_vcap_filter_del(ocelot, redirect_rule); 216 } 217 218 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 219 { 220 struct ocelot *ocelot = ds->priv; 221 222 if (vid_is_dsa_8021q_rxvlan(vid)) 223 return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), 224 port, vid); 225 226 if (vid_is_dsa_8021q_txvlan(vid)) 227 return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), 228 port, vid); 229 230 return 0; 231 } 232 233 /* Alternatively to using the NPI functionality, that same hardware MAC 234 * connected internally to the enetc or fman DSA master can be configured to 235 * use the software-defined tag_8021q frame format. As far as the hardware is 236 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 237 * module are now disconnected from it, but can still be accessed through 238 * register-based MMIO. 239 */ 240 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 241 { 242 mutex_lock(&ocelot->fwd_domain_lock); 243 244 ocelot->ports[port]->is_dsa_8021q_cpu = true; 245 ocelot->npi = -1; 246 247 /* Overwrite PGID_CPU with the non-tagging port */ 248 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 249 250 ocelot_apply_bridge_fwd_mask(ocelot, true); 251 252 mutex_unlock(&ocelot->fwd_domain_lock); 253 } 254 255 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 256 { 257 mutex_lock(&ocelot->fwd_domain_lock); 258 259 ocelot->ports[port]->is_dsa_8021q_cpu = false; 260 261 /* Restore PGID_CPU */ 262 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 263 PGID_CPU); 264 265 ocelot_apply_bridge_fwd_mask(ocelot, true); 266 267 mutex_unlock(&ocelot->fwd_domain_lock); 268 } 269 270 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. 271 * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the 272 * tag_8021q CPU port. 273 */ 274 static int felix_setup_mmio_filtering(struct felix *felix) 275 { 276 unsigned long user_ports = dsa_user_ports(felix->ds); 277 struct ocelot_vcap_filter *redirect_rule; 278 struct ocelot_vcap_filter *tagging_rule; 279 struct ocelot *ocelot = &felix->ocelot; 280 struct dsa_switch *ds = felix->ds; 281 struct dsa_port *dp; 282 int cpu = -1, ret; 283 284 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 285 if (!tagging_rule) 286 return -ENOMEM; 287 288 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 289 if (!redirect_rule) { 290 kfree(tagging_rule); 291 return -ENOMEM; 292 } 293 294 dsa_switch_for_each_cpu_port(dp, ds) { 295 cpu = dp->index; 296 break; 297 } 298 299 if (cpu < 0) { 300 kfree(tagging_rule); 301 kfree(redirect_rule); 302 return -EINVAL; 303 } 304 305 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; 306 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); 307 *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); 308 tagging_rule->ingress_port_mask = user_ports; 309 tagging_rule->prio = 1; 310 tagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_PTP_MMIO(ocelot); 311 tagging_rule->id.tc_offload = false; 312 tagging_rule->block_id = VCAP_IS1; 313 tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 314 tagging_rule->lookup = 0; 315 tagging_rule->action.pag_override_mask = 0xff; 316 tagging_rule->action.pag_val = ocelot->num_phys_ports; 317 318 ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); 319 if (ret) { 320 kfree(tagging_rule); 321 kfree(redirect_rule); 322 return ret; 323 } 324 325 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 326 redirect_rule->ingress_port_mask = user_ports; 327 redirect_rule->pag = ocelot->num_phys_ports; 328 redirect_rule->prio = 1; 329 redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_PTP_MMIO(ocelot); 330 redirect_rule->id.tc_offload = false; 331 redirect_rule->block_id = VCAP_IS2; 332 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 333 redirect_rule->lookup = 0; 334 redirect_rule->action.cpu_copy_ena = true; 335 if (felix->info->quirk_no_xtr_irq) { 336 /* Redirect to the tag_8021q CPU but also copy PTP packets to 337 * the CPU port module 338 */ 339 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 340 redirect_rule->action.port_mask = BIT(cpu); 341 } else { 342 /* Trap PTP packets only to the CPU port module (which is 343 * redirected to the NPI port) 344 */ 345 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 346 redirect_rule->action.port_mask = 0; 347 } 348 349 ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 350 if (ret) { 351 ocelot_vcap_filter_del(ocelot, tagging_rule); 352 kfree(redirect_rule); 353 return ret; 354 } 355 356 /* The ownership of the CPU port module's queues might have just been 357 * transferred to the tag_8021q tagger from the NPI-based tagger. 358 * So there might still be all sorts of crap in the queues. On the 359 * other hand, the MMIO-based matching of PTP frames is very brittle, 360 * so we need to be careful that there are no extra frames to be 361 * dequeued over MMIO, since we would never know to discard them. 362 */ 363 ocelot_drain_cpu_queue(ocelot, 0); 364 365 return 0; 366 } 367 368 static int felix_teardown_mmio_filtering(struct felix *felix) 369 { 370 struct ocelot_vcap_filter *tagging_rule, *redirect_rule; 371 struct ocelot_vcap_block *block_vcap_is1; 372 struct ocelot_vcap_block *block_vcap_is2; 373 struct ocelot *ocelot = &felix->ocelot; 374 int err; 375 376 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 377 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 378 379 tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 380 ocelot->num_phys_ports, 381 false); 382 if (!tagging_rule) 383 return -ENOENT; 384 385 err = ocelot_vcap_filter_del(ocelot, tagging_rule); 386 if (err) 387 return err; 388 389 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 390 ocelot->num_phys_ports, 391 false); 392 if (!redirect_rule) 393 return -ENOENT; 394 395 return ocelot_vcap_filter_del(ocelot, redirect_rule); 396 } 397 398 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) 399 { 400 struct ocelot *ocelot = ds->priv; 401 struct felix *felix = ocelot_to_felix(ocelot); 402 unsigned long cpu_flood; 403 struct dsa_port *dp; 404 int err; 405 406 felix_8021q_cpu_port_init(ocelot, cpu); 407 408 dsa_switch_for_each_available_port(dp, ds) { 409 /* This overwrites ocelot_init(): 410 * Do not forward BPDU frames to the CPU port module, 411 * for 2 reasons: 412 * - When these packets are injected from the tag_8021q 413 * CPU port, we want them to go out, not loop back 414 * into the system. 415 * - STP traffic ingressing on a user port should go to 416 * the tag_8021q CPU port, not to the hardware CPU 417 * port module. 418 */ 419 ocelot_write_gix(ocelot, 420 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 421 ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); 422 } 423 424 /* In tag_8021q mode, the CPU port module is unused, except for PTP 425 * frames. So we want to disable flooding of any kind to the CPU port 426 * module, since packets going there will end in a black hole. 427 */ 428 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 429 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); 430 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); 431 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); 432 433 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 434 if (err) 435 return err; 436 437 err = felix_setup_mmio_filtering(felix); 438 if (err) 439 goto out_tag_8021q_unregister; 440 441 return 0; 442 443 out_tag_8021q_unregister: 444 dsa_tag_8021q_unregister(ds); 445 return err; 446 } 447 448 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) 449 { 450 struct ocelot *ocelot = ds->priv; 451 struct felix *felix = ocelot_to_felix(ocelot); 452 struct dsa_port *dp; 453 int err; 454 455 err = felix_teardown_mmio_filtering(felix); 456 if (err) 457 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", 458 err); 459 460 dsa_tag_8021q_unregister(ds); 461 462 dsa_switch_for_each_available_port(dp, ds) { 463 /* Restore the logic from ocelot_init: 464 * do not forward BPDU frames to the front ports. 465 */ 466 ocelot_write_gix(ocelot, 467 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 468 ANA_PORT_CPU_FWD_BPDU_CFG, 469 dp->index); 470 } 471 472 felix_8021q_cpu_port_deinit(ocelot, cpu); 473 } 474 475 /* The CPU port module is connected to the Node Processor Interface (NPI). This 476 * is the mode through which frames can be injected from and extracted to an 477 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 478 * running Linux, and this forms a DSA setup together with the enetc or fman 479 * DSA master. 480 */ 481 static void felix_npi_port_init(struct ocelot *ocelot, int port) 482 { 483 ocelot->npi = port; 484 485 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 486 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 487 QSYS_EXT_CPU_CFG); 488 489 /* NPI port Injection/Extraction configuration */ 490 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 491 ocelot->npi_xtr_prefix); 492 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 493 ocelot->npi_inj_prefix); 494 495 /* Disable transmission of pause frames */ 496 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 497 } 498 499 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 500 { 501 /* Restore hardware defaults */ 502 int unused_port = ocelot->num_phys_ports + 2; 503 504 ocelot->npi = -1; 505 506 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 507 QSYS_EXT_CPU_CFG); 508 509 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 510 OCELOT_TAG_PREFIX_DISABLED); 511 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 512 OCELOT_TAG_PREFIX_DISABLED); 513 514 /* Enable transmission of pause frames */ 515 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 516 } 517 518 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) 519 { 520 struct ocelot *ocelot = ds->priv; 521 unsigned long cpu_flood; 522 523 felix_npi_port_init(ocelot, cpu); 524 525 /* Include the CPU port module (and indirectly, the NPI port) 526 * in the forwarding mask for unknown unicast - the hardware 527 * default value for ANA_FLOODING_FLD_UNICAST excludes 528 * BIT(ocelot->num_phys_ports), and so does ocelot_init, 529 * since Ocelot relies on whitelisting MAC addresses towards 530 * PGID_CPU. 531 * We do this because DSA does not yet perform RX filtering, 532 * and the NPI port does not perform source address learning, 533 * so traffic sent to Linux is effectively unknown from the 534 * switch's perspective. 535 */ 536 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 537 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); 538 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); 539 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); 540 541 return 0; 542 } 543 544 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) 545 { 546 struct ocelot *ocelot = ds->priv; 547 548 felix_npi_port_deinit(ocelot, cpu); 549 } 550 551 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, 552 enum dsa_tag_protocol proto) 553 { 554 int err; 555 556 switch (proto) { 557 case DSA_TAG_PROTO_SEVILLE: 558 case DSA_TAG_PROTO_OCELOT: 559 err = felix_setup_tag_npi(ds, cpu); 560 break; 561 case DSA_TAG_PROTO_OCELOT_8021Q: 562 err = felix_setup_tag_8021q(ds, cpu); 563 break; 564 default: 565 err = -EPROTONOSUPPORT; 566 } 567 568 return err; 569 } 570 571 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, 572 enum dsa_tag_protocol proto) 573 { 574 switch (proto) { 575 case DSA_TAG_PROTO_SEVILLE: 576 case DSA_TAG_PROTO_OCELOT: 577 felix_teardown_tag_npi(ds, cpu); 578 break; 579 case DSA_TAG_PROTO_OCELOT_8021Q: 580 felix_teardown_tag_8021q(ds, cpu); 581 break; 582 default: 583 break; 584 } 585 } 586 587 /* This always leaves the switch in a consistent state, because although the 588 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 589 * or the restoration is guaranteed to work. 590 */ 591 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, 592 enum dsa_tag_protocol proto) 593 { 594 struct ocelot *ocelot = ds->priv; 595 struct felix *felix = ocelot_to_felix(ocelot); 596 enum dsa_tag_protocol old_proto = felix->tag_proto; 597 int err; 598 599 if (proto != DSA_TAG_PROTO_SEVILLE && 600 proto != DSA_TAG_PROTO_OCELOT && 601 proto != DSA_TAG_PROTO_OCELOT_8021Q) 602 return -EPROTONOSUPPORT; 603 604 felix_del_tag_protocol(ds, cpu, old_proto); 605 606 err = felix_set_tag_protocol(ds, cpu, proto); 607 if (err) { 608 felix_set_tag_protocol(ds, cpu, old_proto); 609 return err; 610 } 611 612 felix->tag_proto = proto; 613 614 return 0; 615 } 616 617 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 618 int port, 619 enum dsa_tag_protocol mp) 620 { 621 struct ocelot *ocelot = ds->priv; 622 struct felix *felix = ocelot_to_felix(ocelot); 623 624 return felix->tag_proto; 625 } 626 627 static int felix_set_ageing_time(struct dsa_switch *ds, 628 unsigned int ageing_time) 629 { 630 struct ocelot *ocelot = ds->priv; 631 632 ocelot_set_ageing_time(ocelot, ageing_time); 633 634 return 0; 635 } 636 637 static void felix_port_fast_age(struct dsa_switch *ds, int port) 638 { 639 struct ocelot *ocelot = ds->priv; 640 int err; 641 642 err = ocelot_mact_flush(ocelot, port); 643 if (err) 644 dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", 645 port, ERR_PTR(err)); 646 } 647 648 static int felix_fdb_dump(struct dsa_switch *ds, int port, 649 dsa_fdb_dump_cb_t *cb, void *data) 650 { 651 struct ocelot *ocelot = ds->priv; 652 653 return ocelot_fdb_dump(ocelot, port, cb, data); 654 } 655 656 static int felix_fdb_add(struct dsa_switch *ds, int port, 657 const unsigned char *addr, u16 vid) 658 { 659 struct ocelot *ocelot = ds->priv; 660 661 return ocelot_fdb_add(ocelot, port, addr, vid); 662 } 663 664 static int felix_fdb_del(struct dsa_switch *ds, int port, 665 const unsigned char *addr, u16 vid) 666 { 667 struct ocelot *ocelot = ds->priv; 668 669 return ocelot_fdb_del(ocelot, port, addr, vid); 670 } 671 672 static int felix_mdb_add(struct dsa_switch *ds, int port, 673 const struct switchdev_obj_port_mdb *mdb) 674 { 675 struct ocelot *ocelot = ds->priv; 676 677 return ocelot_port_mdb_add(ocelot, port, mdb); 678 } 679 680 static int felix_mdb_del(struct dsa_switch *ds, int port, 681 const struct switchdev_obj_port_mdb *mdb) 682 { 683 struct ocelot *ocelot = ds->priv; 684 685 return ocelot_port_mdb_del(ocelot, port, mdb); 686 } 687 688 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 689 u8 state) 690 { 691 struct ocelot *ocelot = ds->priv; 692 693 return ocelot_bridge_stp_state_set(ocelot, port, state); 694 } 695 696 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 697 struct switchdev_brport_flags val, 698 struct netlink_ext_ack *extack) 699 { 700 struct ocelot *ocelot = ds->priv; 701 702 return ocelot_port_pre_bridge_flags(ocelot, port, val); 703 } 704 705 static int felix_bridge_flags(struct dsa_switch *ds, int port, 706 struct switchdev_brport_flags val, 707 struct netlink_ext_ack *extack) 708 { 709 struct ocelot *ocelot = ds->priv; 710 711 ocelot_port_bridge_flags(ocelot, port, val); 712 713 return 0; 714 } 715 716 static int felix_bridge_join(struct dsa_switch *ds, int port, 717 struct dsa_bridge bridge, bool *tx_fwd_offload) 718 { 719 struct ocelot *ocelot = ds->priv; 720 721 ocelot_port_bridge_join(ocelot, port, bridge.dev); 722 723 return 0; 724 } 725 726 static void felix_bridge_leave(struct dsa_switch *ds, int port, 727 struct dsa_bridge bridge) 728 { 729 struct ocelot *ocelot = ds->priv; 730 731 ocelot_port_bridge_leave(ocelot, port, bridge.dev); 732 } 733 734 static int felix_lag_join(struct dsa_switch *ds, int port, 735 struct net_device *bond, 736 struct netdev_lag_upper_info *info) 737 { 738 struct ocelot *ocelot = ds->priv; 739 740 return ocelot_port_lag_join(ocelot, port, bond, info); 741 } 742 743 static int felix_lag_leave(struct dsa_switch *ds, int port, 744 struct net_device *bond) 745 { 746 struct ocelot *ocelot = ds->priv; 747 748 ocelot_port_lag_leave(ocelot, port, bond); 749 750 return 0; 751 } 752 753 static int felix_lag_change(struct dsa_switch *ds, int port) 754 { 755 struct dsa_port *dp = dsa_to_port(ds, port); 756 struct ocelot *ocelot = ds->priv; 757 758 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 759 760 return 0; 761 } 762 763 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 764 const struct switchdev_obj_port_vlan *vlan, 765 struct netlink_ext_ack *extack) 766 { 767 struct ocelot *ocelot = ds->priv; 768 u16 flags = vlan->flags; 769 770 /* Ocelot switches copy frames as-is to the CPU, so the flags: 771 * egress-untagged or not, pvid or not, make no difference. This 772 * behavior is already better than what DSA just tries to approximate 773 * when it installs the VLAN with the same flags on the CPU port. 774 * Just accept any configuration, and don't let ocelot deny installing 775 * multiple native VLANs on the NPI port, because the switch doesn't 776 * look at the port tag settings towards the NPI interface anyway. 777 */ 778 if (port == ocelot->npi) 779 return 0; 780 781 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 782 flags & BRIDGE_VLAN_INFO_PVID, 783 flags & BRIDGE_VLAN_INFO_UNTAGGED, 784 extack); 785 } 786 787 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 788 struct netlink_ext_ack *extack) 789 { 790 struct ocelot *ocelot = ds->priv; 791 792 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 793 } 794 795 static int felix_vlan_add(struct dsa_switch *ds, int port, 796 const struct switchdev_obj_port_vlan *vlan, 797 struct netlink_ext_ack *extack) 798 { 799 struct ocelot *ocelot = ds->priv; 800 u16 flags = vlan->flags; 801 int err; 802 803 err = felix_vlan_prepare(ds, port, vlan, extack); 804 if (err) 805 return err; 806 807 return ocelot_vlan_add(ocelot, port, vlan->vid, 808 flags & BRIDGE_VLAN_INFO_PVID, 809 flags & BRIDGE_VLAN_INFO_UNTAGGED); 810 } 811 812 static int felix_vlan_del(struct dsa_switch *ds, int port, 813 const struct switchdev_obj_port_vlan *vlan) 814 { 815 struct ocelot *ocelot = ds->priv; 816 817 return ocelot_vlan_del(ocelot, port, vlan->vid); 818 } 819 820 static void felix_phylink_validate(struct dsa_switch *ds, int port, 821 unsigned long *supported, 822 struct phylink_link_state *state) 823 { 824 struct ocelot *ocelot = ds->priv; 825 struct felix *felix = ocelot_to_felix(ocelot); 826 827 if (felix->info->phylink_validate) 828 felix->info->phylink_validate(ocelot, port, supported, state); 829 } 830 831 static void felix_phylink_mac_config(struct dsa_switch *ds, int port, 832 unsigned int link_an_mode, 833 const struct phylink_link_state *state) 834 { 835 struct ocelot *ocelot = ds->priv; 836 struct felix *felix = ocelot_to_felix(ocelot); 837 struct dsa_port *dp = dsa_to_port(ds, port); 838 839 if (felix->pcs && felix->pcs[port]) 840 phylink_set_pcs(dp->pl, felix->pcs[port]); 841 } 842 843 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 844 unsigned int link_an_mode, 845 phy_interface_t interface) 846 { 847 struct ocelot *ocelot = ds->priv; 848 849 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 850 FELIX_MAC_QUIRKS); 851 } 852 853 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 854 unsigned int link_an_mode, 855 phy_interface_t interface, 856 struct phy_device *phydev, 857 int speed, int duplex, 858 bool tx_pause, bool rx_pause) 859 { 860 struct ocelot *ocelot = ds->priv; 861 struct felix *felix = ocelot_to_felix(ocelot); 862 863 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 864 interface, speed, duplex, tx_pause, rx_pause, 865 FELIX_MAC_QUIRKS); 866 867 if (felix->info->port_sched_speed_set) 868 felix->info->port_sched_speed_set(ocelot, port, speed); 869 } 870 871 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 872 { 873 int i; 874 875 ocelot_rmw_gix(ocelot, 876 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 877 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 878 ANA_PORT_QOS_CFG, 879 port); 880 881 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 882 ocelot_rmw_ix(ocelot, 883 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 884 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 885 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 886 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 887 ANA_PORT_PCP_DEI_MAP, 888 port, i); 889 } 890 } 891 892 static void felix_get_strings(struct dsa_switch *ds, int port, 893 u32 stringset, u8 *data) 894 { 895 struct ocelot *ocelot = ds->priv; 896 897 return ocelot_get_strings(ocelot, port, stringset, data); 898 } 899 900 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 901 { 902 struct ocelot *ocelot = ds->priv; 903 904 ocelot_get_ethtool_stats(ocelot, port, data); 905 } 906 907 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 908 { 909 struct ocelot *ocelot = ds->priv; 910 911 return ocelot_get_sset_count(ocelot, port, sset); 912 } 913 914 static int felix_get_ts_info(struct dsa_switch *ds, int port, 915 struct ethtool_ts_info *info) 916 { 917 struct ocelot *ocelot = ds->priv; 918 919 return ocelot_get_ts_info(ocelot, port, info); 920 } 921 922 static int felix_parse_ports_node(struct felix *felix, 923 struct device_node *ports_node, 924 phy_interface_t *port_phy_modes) 925 { 926 struct ocelot *ocelot = &felix->ocelot; 927 struct device *dev = felix->ocelot.dev; 928 struct device_node *child; 929 930 for_each_available_child_of_node(ports_node, child) { 931 phy_interface_t phy_mode; 932 u32 port; 933 int err; 934 935 /* Get switch port number from DT */ 936 if (of_property_read_u32(child, "reg", &port) < 0) { 937 dev_err(dev, "Port number not defined in device tree " 938 "(property \"reg\")\n"); 939 of_node_put(child); 940 return -ENODEV; 941 } 942 943 /* Get PHY mode from DT */ 944 err = of_get_phy_mode(child, &phy_mode); 945 if (err) { 946 dev_err(dev, "Failed to read phy-mode or " 947 "phy-interface-type property for port %d\n", 948 port); 949 of_node_put(child); 950 return -ENODEV; 951 } 952 953 err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); 954 if (err < 0) { 955 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 956 phy_modes(phy_mode), port); 957 of_node_put(child); 958 return err; 959 } 960 961 port_phy_modes[port] = phy_mode; 962 } 963 964 return 0; 965 } 966 967 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 968 { 969 struct device *dev = felix->ocelot.dev; 970 struct device_node *switch_node; 971 struct device_node *ports_node; 972 int err; 973 974 switch_node = dev->of_node; 975 976 ports_node = of_get_child_by_name(switch_node, "ports"); 977 if (!ports_node) 978 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 979 if (!ports_node) { 980 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); 981 return -ENODEV; 982 } 983 984 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 985 of_node_put(ports_node); 986 987 return err; 988 } 989 990 static int felix_init_structs(struct felix *felix, int num_phys_ports) 991 { 992 struct ocelot *ocelot = &felix->ocelot; 993 phy_interface_t *port_phy_modes; 994 struct resource res; 995 int port, i, err; 996 997 ocelot->num_phys_ports = num_phys_ports; 998 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 999 sizeof(struct ocelot_port *), GFP_KERNEL); 1000 if (!ocelot->ports) 1001 return -ENOMEM; 1002 1003 ocelot->map = felix->info->map; 1004 ocelot->stats_layout = felix->info->stats_layout; 1005 ocelot->num_stats = felix->info->num_stats; 1006 ocelot->num_mact_rows = felix->info->num_mact_rows; 1007 ocelot->vcap = felix->info->vcap; 1008 ocelot->vcap_pol.base = felix->info->vcap_pol_base; 1009 ocelot->vcap_pol.max = felix->info->vcap_pol_max; 1010 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; 1011 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; 1012 ocelot->ops = felix->info->ops; 1013 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1014 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1015 ocelot->devlink = felix->ds->devlink; 1016 1017 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1018 GFP_KERNEL); 1019 if (!port_phy_modes) 1020 return -ENOMEM; 1021 1022 err = felix_parse_dt(felix, port_phy_modes); 1023 if (err) { 1024 kfree(port_phy_modes); 1025 return err; 1026 } 1027 1028 for (i = 0; i < TARGET_MAX; i++) { 1029 struct regmap *target; 1030 1031 if (!felix->info->target_io_res[i].name) 1032 continue; 1033 1034 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1035 res.flags = IORESOURCE_MEM; 1036 res.start += felix->switch_base; 1037 res.end += felix->switch_base; 1038 1039 target = felix->info->init_regmap(ocelot, &res); 1040 if (IS_ERR(target)) { 1041 dev_err(ocelot->dev, 1042 "Failed to map device memory space\n"); 1043 kfree(port_phy_modes); 1044 return PTR_ERR(target); 1045 } 1046 1047 ocelot->targets[i] = target; 1048 } 1049 1050 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1051 if (err) { 1052 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1053 kfree(port_phy_modes); 1054 return err; 1055 } 1056 1057 for (port = 0; port < num_phys_ports; port++) { 1058 struct ocelot_port *ocelot_port; 1059 struct regmap *target; 1060 1061 ocelot_port = devm_kzalloc(ocelot->dev, 1062 sizeof(struct ocelot_port), 1063 GFP_KERNEL); 1064 if (!ocelot_port) { 1065 dev_err(ocelot->dev, 1066 "failed to allocate port memory\n"); 1067 kfree(port_phy_modes); 1068 return -ENOMEM; 1069 } 1070 1071 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1072 res.flags = IORESOURCE_MEM; 1073 res.start += felix->switch_base; 1074 res.end += felix->switch_base; 1075 1076 target = felix->info->init_regmap(ocelot, &res); 1077 if (IS_ERR(target)) { 1078 dev_err(ocelot->dev, 1079 "Failed to map memory space for port %d\n", 1080 port); 1081 kfree(port_phy_modes); 1082 return PTR_ERR(target); 1083 } 1084 1085 ocelot_port->phy_mode = port_phy_modes[port]; 1086 ocelot_port->ocelot = ocelot; 1087 ocelot_port->target = target; 1088 ocelot->ports[port] = ocelot_port; 1089 } 1090 1091 kfree(port_phy_modes); 1092 1093 if (felix->info->mdio_bus_alloc) { 1094 err = felix->info->mdio_bus_alloc(ocelot); 1095 if (err < 0) 1096 return err; 1097 } 1098 1099 return 0; 1100 } 1101 1102 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1103 struct sk_buff *skb) 1104 { 1105 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1106 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1107 struct sk_buff *skb_match = NULL, *skb_tmp; 1108 unsigned long flags; 1109 1110 if (!clone) 1111 return; 1112 1113 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1114 1115 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1116 if (skb != clone) 1117 continue; 1118 __skb_unlink(skb, &ocelot_port->tx_skbs); 1119 skb_match = skb; 1120 break; 1121 } 1122 1123 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1124 1125 WARN_ONCE(!skb_match, 1126 "Could not find skb clone in TX timestamping list\n"); 1127 } 1128 1129 #define work_to_xmit_work(w) \ 1130 container_of((w), struct felix_deferred_xmit_work, work) 1131 1132 static void felix_port_deferred_xmit(struct kthread_work *work) 1133 { 1134 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1135 struct dsa_switch *ds = xmit_work->dp->ds; 1136 struct sk_buff *skb = xmit_work->skb; 1137 u32 rew_op = ocelot_ptp_rew_op(skb); 1138 struct ocelot *ocelot = ds->priv; 1139 int port = xmit_work->dp->index; 1140 int retries = 10; 1141 1142 do { 1143 if (ocelot_can_inject(ocelot, 0)) 1144 break; 1145 1146 cpu_relax(); 1147 } while (--retries); 1148 1149 if (!retries) { 1150 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1151 port); 1152 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1153 kfree_skb(skb); 1154 return; 1155 } 1156 1157 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1158 1159 consume_skb(skb); 1160 kfree(xmit_work); 1161 } 1162 1163 static int felix_connect_tag_protocol(struct dsa_switch *ds, 1164 enum dsa_tag_protocol proto) 1165 { 1166 struct ocelot_8021q_tagger_data *tagger_data; 1167 1168 switch (proto) { 1169 case DSA_TAG_PROTO_OCELOT_8021Q: 1170 tagger_data = ocelot_8021q_tagger_data(ds); 1171 tagger_data->xmit_work_fn = felix_port_deferred_xmit; 1172 return 0; 1173 case DSA_TAG_PROTO_OCELOT: 1174 case DSA_TAG_PROTO_SEVILLE: 1175 return 0; 1176 default: 1177 return -EPROTONOSUPPORT; 1178 } 1179 } 1180 1181 /* Hardware initialization done here so that we can allocate structures with 1182 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1183 * us to allocate structures twice (leak memory) and map PCI memory twice 1184 * (which will not work). 1185 */ 1186 static int felix_setup(struct dsa_switch *ds) 1187 { 1188 struct ocelot *ocelot = ds->priv; 1189 struct felix *felix = ocelot_to_felix(ocelot); 1190 struct dsa_port *dp; 1191 int err; 1192 1193 err = felix_init_structs(felix, ds->num_ports); 1194 if (err) 1195 return err; 1196 1197 err = ocelot_init(ocelot); 1198 if (err) 1199 goto out_mdiobus_free; 1200 1201 if (ocelot->ptp) { 1202 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1203 if (err) { 1204 dev_err(ocelot->dev, 1205 "Timestamp initialization failed\n"); 1206 ocelot->ptp = 0; 1207 } 1208 } 1209 1210 dsa_switch_for_each_available_port(dp, ds) { 1211 ocelot_init_port(ocelot, dp->index); 1212 1213 /* Set the default QoS Classification based on PCP and DEI 1214 * bits of vlan tag. 1215 */ 1216 felix_port_qos_map_init(ocelot, dp->index); 1217 } 1218 1219 err = ocelot_devlink_sb_register(ocelot); 1220 if (err) 1221 goto out_deinit_ports; 1222 1223 dsa_switch_for_each_cpu_port(dp, ds) { 1224 /* The initial tag protocol is NPI which always returns 0, so 1225 * there's no real point in checking for errors. 1226 */ 1227 felix_set_tag_protocol(ds, dp->index, felix->tag_proto); 1228 break; 1229 } 1230 1231 ds->mtu_enforcement_ingress = true; 1232 ds->assisted_learning_on_cpu_port = true; 1233 1234 return 0; 1235 1236 out_deinit_ports: 1237 dsa_switch_for_each_available_port(dp, ds) 1238 ocelot_deinit_port(ocelot, dp->index); 1239 1240 ocelot_deinit_timestamp(ocelot); 1241 ocelot_deinit(ocelot); 1242 1243 out_mdiobus_free: 1244 if (felix->info->mdio_bus_free) 1245 felix->info->mdio_bus_free(ocelot); 1246 1247 return err; 1248 } 1249 1250 static void felix_teardown(struct dsa_switch *ds) 1251 { 1252 struct ocelot *ocelot = ds->priv; 1253 struct felix *felix = ocelot_to_felix(ocelot); 1254 struct dsa_port *dp; 1255 1256 dsa_switch_for_each_cpu_port(dp, ds) { 1257 felix_del_tag_protocol(ds, dp->index, felix->tag_proto); 1258 break; 1259 } 1260 1261 dsa_switch_for_each_available_port(dp, ds) 1262 ocelot_deinit_port(ocelot, dp->index); 1263 1264 ocelot_devlink_sb_unregister(ocelot); 1265 ocelot_deinit_timestamp(ocelot); 1266 ocelot_deinit(ocelot); 1267 1268 if (felix->info->mdio_bus_free) 1269 felix->info->mdio_bus_free(ocelot); 1270 } 1271 1272 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1273 struct ifreq *ifr) 1274 { 1275 struct ocelot *ocelot = ds->priv; 1276 1277 return ocelot_hwstamp_get(ocelot, port, ifr); 1278 } 1279 1280 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1281 struct ifreq *ifr) 1282 { 1283 struct ocelot *ocelot = ds->priv; 1284 1285 return ocelot_hwstamp_set(ocelot, port, ifr); 1286 } 1287 1288 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) 1289 { 1290 struct felix *felix = ocelot_to_felix(ocelot); 1291 int err, grp = 0; 1292 1293 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1294 return false; 1295 1296 if (!felix->info->quirk_no_xtr_irq) 1297 return false; 1298 1299 if (ptp_type == PTP_CLASS_NONE) 1300 return false; 1301 1302 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1303 struct sk_buff *skb; 1304 unsigned int type; 1305 1306 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1307 if (err) 1308 goto out; 1309 1310 /* We trap to the CPU port module all PTP frames, but 1311 * felix_rxtstamp() only gets called for event frames. 1312 * So we need to avoid sending duplicate general 1313 * message frames by running a second BPF classifier 1314 * here and dropping those. 1315 */ 1316 __skb_push(skb, ETH_HLEN); 1317 1318 type = ptp_classify_raw(skb); 1319 1320 __skb_pull(skb, ETH_HLEN); 1321 1322 if (type == PTP_CLASS_NONE) { 1323 kfree_skb(skb); 1324 continue; 1325 } 1326 1327 netif_rx(skb); 1328 } 1329 1330 out: 1331 if (err < 0) 1332 ocelot_drain_cpu_queue(ocelot, 0); 1333 1334 return true; 1335 } 1336 1337 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1338 struct sk_buff *skb, unsigned int type) 1339 { 1340 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; 1341 struct skb_shared_hwtstamps *shhwtstamps; 1342 struct ocelot *ocelot = ds->priv; 1343 struct timespec64 ts; 1344 u32 tstamp_hi; 1345 u64 tstamp; 1346 1347 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1348 * for RX timestamping. Then free it, and poll for its copy through 1349 * MMIO in the CPU port module, and inject that into the stack from 1350 * ocelot_xtr_poll(). 1351 */ 1352 if (felix_check_xtr_pkt(ocelot, type)) { 1353 kfree_skb(skb); 1354 return true; 1355 } 1356 1357 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1358 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1359 1360 tstamp_hi = tstamp >> 32; 1361 if ((tstamp & 0xffffffff) < tstamp_lo) 1362 tstamp_hi--; 1363 1364 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1365 1366 shhwtstamps = skb_hwtstamps(skb); 1367 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1368 shhwtstamps->hwtstamp = tstamp; 1369 return false; 1370 } 1371 1372 static void felix_txtstamp(struct dsa_switch *ds, int port, 1373 struct sk_buff *skb) 1374 { 1375 struct ocelot *ocelot = ds->priv; 1376 struct sk_buff *clone = NULL; 1377 1378 if (!ocelot->ptp) 1379 return; 1380 1381 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1382 dev_err_ratelimited(ds->dev, 1383 "port %d delivering skb without TX timestamp\n", 1384 port); 1385 return; 1386 } 1387 1388 if (clone) 1389 OCELOT_SKB_CB(skb)->clone = clone; 1390 } 1391 1392 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1393 { 1394 struct ocelot *ocelot = ds->priv; 1395 1396 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1397 1398 return 0; 1399 } 1400 1401 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1402 { 1403 struct ocelot *ocelot = ds->priv; 1404 1405 return ocelot_get_max_mtu(ocelot, port); 1406 } 1407 1408 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1409 struct flow_cls_offload *cls, bool ingress) 1410 { 1411 struct ocelot *ocelot = ds->priv; 1412 1413 return ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1414 } 1415 1416 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1417 struct flow_cls_offload *cls, bool ingress) 1418 { 1419 struct ocelot *ocelot = ds->priv; 1420 1421 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1422 } 1423 1424 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1425 struct flow_cls_offload *cls, bool ingress) 1426 { 1427 struct ocelot *ocelot = ds->priv; 1428 1429 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1430 } 1431 1432 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1433 struct dsa_mall_policer_tc_entry *policer) 1434 { 1435 struct ocelot *ocelot = ds->priv; 1436 struct ocelot_policer pol = { 1437 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1438 .burst = policer->burst, 1439 }; 1440 1441 return ocelot_port_policer_add(ocelot, port, &pol); 1442 } 1443 1444 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1445 { 1446 struct ocelot *ocelot = ds->priv; 1447 1448 ocelot_port_policer_del(ocelot, port); 1449 } 1450 1451 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1452 enum tc_setup_type type, 1453 void *type_data) 1454 { 1455 struct ocelot *ocelot = ds->priv; 1456 struct felix *felix = ocelot_to_felix(ocelot); 1457 1458 if (felix->info->port_setup_tc) 1459 return felix->info->port_setup_tc(ds, port, type, type_data); 1460 else 1461 return -EOPNOTSUPP; 1462 } 1463 1464 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1465 u16 pool_index, 1466 struct devlink_sb_pool_info *pool_info) 1467 { 1468 struct ocelot *ocelot = ds->priv; 1469 1470 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1471 } 1472 1473 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1474 u16 pool_index, u32 size, 1475 enum devlink_sb_threshold_type threshold_type, 1476 struct netlink_ext_ack *extack) 1477 { 1478 struct ocelot *ocelot = ds->priv; 1479 1480 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1481 threshold_type, extack); 1482 } 1483 1484 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1485 unsigned int sb_index, u16 pool_index, 1486 u32 *p_threshold) 1487 { 1488 struct ocelot *ocelot = ds->priv; 1489 1490 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1491 p_threshold); 1492 } 1493 1494 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1495 unsigned int sb_index, u16 pool_index, 1496 u32 threshold, struct netlink_ext_ack *extack) 1497 { 1498 struct ocelot *ocelot = ds->priv; 1499 1500 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1501 threshold, extack); 1502 } 1503 1504 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1505 unsigned int sb_index, u16 tc_index, 1506 enum devlink_sb_pool_type pool_type, 1507 u16 *p_pool_index, u32 *p_threshold) 1508 { 1509 struct ocelot *ocelot = ds->priv; 1510 1511 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1512 pool_type, p_pool_index, 1513 p_threshold); 1514 } 1515 1516 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1517 unsigned int sb_index, u16 tc_index, 1518 enum devlink_sb_pool_type pool_type, 1519 u16 pool_index, u32 threshold, 1520 struct netlink_ext_ack *extack) 1521 { 1522 struct ocelot *ocelot = ds->priv; 1523 1524 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1525 pool_type, pool_index, threshold, 1526 extack); 1527 } 1528 1529 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1530 unsigned int sb_index) 1531 { 1532 struct ocelot *ocelot = ds->priv; 1533 1534 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1535 } 1536 1537 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1538 unsigned int sb_index) 1539 { 1540 struct ocelot *ocelot = ds->priv; 1541 1542 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1543 } 1544 1545 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1546 unsigned int sb_index, u16 pool_index, 1547 u32 *p_cur, u32 *p_max) 1548 { 1549 struct ocelot *ocelot = ds->priv; 1550 1551 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1552 p_cur, p_max); 1553 } 1554 1555 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1556 unsigned int sb_index, u16 tc_index, 1557 enum devlink_sb_pool_type pool_type, 1558 u32 *p_cur, u32 *p_max) 1559 { 1560 struct ocelot *ocelot = ds->priv; 1561 1562 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1563 pool_type, p_cur, p_max); 1564 } 1565 1566 static int felix_mrp_add(struct dsa_switch *ds, int port, 1567 const struct switchdev_obj_mrp *mrp) 1568 { 1569 struct ocelot *ocelot = ds->priv; 1570 1571 return ocelot_mrp_add(ocelot, port, mrp); 1572 } 1573 1574 static int felix_mrp_del(struct dsa_switch *ds, int port, 1575 const struct switchdev_obj_mrp *mrp) 1576 { 1577 struct ocelot *ocelot = ds->priv; 1578 1579 return ocelot_mrp_add(ocelot, port, mrp); 1580 } 1581 1582 static int 1583 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1584 const struct switchdev_obj_ring_role_mrp *mrp) 1585 { 1586 struct ocelot *ocelot = ds->priv; 1587 1588 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1589 } 1590 1591 static int 1592 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1593 const struct switchdev_obj_ring_role_mrp *mrp) 1594 { 1595 struct ocelot *ocelot = ds->priv; 1596 1597 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1598 } 1599 1600 const struct dsa_switch_ops felix_switch_ops = { 1601 .get_tag_protocol = felix_get_tag_protocol, 1602 .change_tag_protocol = felix_change_tag_protocol, 1603 .connect_tag_protocol = felix_connect_tag_protocol, 1604 .setup = felix_setup, 1605 .teardown = felix_teardown, 1606 .set_ageing_time = felix_set_ageing_time, 1607 .get_strings = felix_get_strings, 1608 .get_ethtool_stats = felix_get_ethtool_stats, 1609 .get_sset_count = felix_get_sset_count, 1610 .get_ts_info = felix_get_ts_info, 1611 .phylink_validate = felix_phylink_validate, 1612 .phylink_mac_config = felix_phylink_mac_config, 1613 .phylink_mac_link_down = felix_phylink_mac_link_down, 1614 .phylink_mac_link_up = felix_phylink_mac_link_up, 1615 .port_fast_age = felix_port_fast_age, 1616 .port_fdb_dump = felix_fdb_dump, 1617 .port_fdb_add = felix_fdb_add, 1618 .port_fdb_del = felix_fdb_del, 1619 .port_mdb_add = felix_mdb_add, 1620 .port_mdb_del = felix_mdb_del, 1621 .port_pre_bridge_flags = felix_pre_bridge_flags, 1622 .port_bridge_flags = felix_bridge_flags, 1623 .port_bridge_join = felix_bridge_join, 1624 .port_bridge_leave = felix_bridge_leave, 1625 .port_lag_join = felix_lag_join, 1626 .port_lag_leave = felix_lag_leave, 1627 .port_lag_change = felix_lag_change, 1628 .port_stp_state_set = felix_bridge_stp_state_set, 1629 .port_vlan_filtering = felix_vlan_filtering, 1630 .port_vlan_add = felix_vlan_add, 1631 .port_vlan_del = felix_vlan_del, 1632 .port_hwtstamp_get = felix_hwtstamp_get, 1633 .port_hwtstamp_set = felix_hwtstamp_set, 1634 .port_rxtstamp = felix_rxtstamp, 1635 .port_txtstamp = felix_txtstamp, 1636 .port_change_mtu = felix_change_mtu, 1637 .port_max_mtu = felix_get_max_mtu, 1638 .port_policer_add = felix_port_policer_add, 1639 .port_policer_del = felix_port_policer_del, 1640 .cls_flower_add = felix_cls_flower_add, 1641 .cls_flower_del = felix_cls_flower_del, 1642 .cls_flower_stats = felix_cls_flower_stats, 1643 .port_setup_tc = felix_port_setup_tc, 1644 .devlink_sb_pool_get = felix_sb_pool_get, 1645 .devlink_sb_pool_set = felix_sb_pool_set, 1646 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1647 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1648 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1649 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1650 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1651 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1652 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1653 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1654 .port_mrp_add = felix_mrp_add, 1655 .port_mrp_del = felix_mrp_del, 1656 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1657 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1658 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1659 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1660 }; 1661 1662 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1663 { 1664 struct felix *felix = ocelot_to_felix(ocelot); 1665 struct dsa_switch *ds = felix->ds; 1666 1667 if (!dsa_is_user_port(ds, port)) 1668 return NULL; 1669 1670 return dsa_to_port(ds, port)->slave; 1671 } 1672 1673 int felix_netdev_to_port(struct net_device *dev) 1674 { 1675 struct dsa_port *dp; 1676 1677 dp = dsa_port_from_netdev(dev); 1678 if (IS_ERR(dp)) 1679 return -EINVAL; 1680 1681 return dp->index; 1682 } 1683