1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <linux/pcs-lynx.h> 25 #include <net/pkt_sched.h> 26 #include <net/dsa.h> 27 #include "felix.h" 28 29 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, 30 bool pvid, bool untagged) 31 { 32 struct ocelot_vcap_filter *outer_tagging_rule; 33 struct ocelot *ocelot = &felix->ocelot; 34 struct dsa_switch *ds = felix->ds; 35 int key_length, upstream, err; 36 37 /* We don't need to install the rxvlan into the other ports' filtering 38 * tables, because we're just pushing the rxvlan when sending towards 39 * the CPU 40 */ 41 if (!pvid) 42 return 0; 43 44 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 45 upstream = dsa_upstream_port(ds, port); 46 47 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 48 GFP_KERNEL); 49 if (!outer_tagging_rule) 50 return -ENOMEM; 51 52 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 53 outer_tagging_rule->prio = 1; 54 outer_tagging_rule->id.cookie = port; 55 outer_tagging_rule->id.tc_offload = false; 56 outer_tagging_rule->block_id = VCAP_ES0; 57 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 58 outer_tagging_rule->lookup = 0; 59 outer_tagging_rule->ingress_port.value = port; 60 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 61 outer_tagging_rule->egress_port.value = upstream; 62 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 63 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 64 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 65 outer_tagging_rule->action.tag_a_vid_sel = 1; 66 outer_tagging_rule->action.vid_a_val = vid; 67 68 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 69 if (err) 70 kfree(outer_tagging_rule); 71 72 return err; 73 } 74 75 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, 76 bool pvid, bool untagged) 77 { 78 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 79 struct ocelot *ocelot = &felix->ocelot; 80 struct dsa_switch *ds = felix->ds; 81 int upstream, err; 82 83 /* tag_8021q.c assumes we are implementing this via port VLAN 84 * membership, which we aren't. So we don't need to add any VCAP filter 85 * for the CPU port. 86 */ 87 if (ocelot->ports[port]->is_dsa_8021q_cpu) 88 return 0; 89 90 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 91 if (!untagging_rule) 92 return -ENOMEM; 93 94 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 95 if (!redirect_rule) { 96 kfree(untagging_rule); 97 return -ENOMEM; 98 } 99 100 upstream = dsa_upstream_port(ds, port); 101 102 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 103 untagging_rule->ingress_port_mask = BIT(upstream); 104 untagging_rule->vlan.vid.value = vid; 105 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 106 untagging_rule->prio = 1; 107 untagging_rule->id.cookie = port; 108 untagging_rule->id.tc_offload = false; 109 untagging_rule->block_id = VCAP_IS1; 110 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 111 untagging_rule->lookup = 0; 112 untagging_rule->action.vlan_pop_cnt_ena = true; 113 untagging_rule->action.vlan_pop_cnt = 1; 114 untagging_rule->action.pag_override_mask = 0xff; 115 untagging_rule->action.pag_val = port; 116 117 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 118 if (err) { 119 kfree(untagging_rule); 120 kfree(redirect_rule); 121 return err; 122 } 123 124 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 125 redirect_rule->ingress_port_mask = BIT(upstream); 126 redirect_rule->pag = port; 127 redirect_rule->prio = 1; 128 redirect_rule->id.cookie = port; 129 redirect_rule->id.tc_offload = false; 130 redirect_rule->block_id = VCAP_IS2; 131 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 132 redirect_rule->lookup = 0; 133 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 134 redirect_rule->action.port_mask = BIT(port); 135 136 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 137 if (err) { 138 ocelot_vcap_filter_del(ocelot, untagging_rule); 139 kfree(redirect_rule); 140 return err; 141 } 142 143 return 0; 144 } 145 146 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 147 u16 flags) 148 { 149 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 150 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 151 struct ocelot *ocelot = ds->priv; 152 153 if (vid_is_dsa_8021q_rxvlan(vid)) 154 return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), 155 port, vid, pvid, untagged); 156 157 if (vid_is_dsa_8021q_txvlan(vid)) 158 return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), 159 port, vid, pvid, untagged); 160 161 return 0; 162 } 163 164 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) 165 { 166 struct ocelot_vcap_filter *outer_tagging_rule; 167 struct ocelot_vcap_block *block_vcap_es0; 168 struct ocelot *ocelot = &felix->ocelot; 169 170 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 171 172 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 173 port, false); 174 /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid 175 * installing outer tagging ES0 rules where they weren't needed. 176 * But in rxvlan_del, the API doesn't give us the "flags" anymore, 177 * so that forces us to be slightly sloppy here, and just assume that 178 * if we didn't find an outer_tagging_rule it means that there was 179 * none in the first place, i.e. rxvlan_del is called on a non-pvid 180 * port. This is most probably true though. 181 */ 182 if (!outer_tagging_rule) 183 return 0; 184 185 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 186 } 187 188 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) 189 { 190 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 191 struct ocelot_vcap_block *block_vcap_is1; 192 struct ocelot_vcap_block *block_vcap_is2; 193 struct ocelot *ocelot = &felix->ocelot; 194 int err; 195 196 if (ocelot->ports[port]->is_dsa_8021q_cpu) 197 return 0; 198 199 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 200 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 201 202 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 203 port, false); 204 if (!untagging_rule) 205 return 0; 206 207 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 208 if (err) 209 return err; 210 211 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 212 port, false); 213 if (!redirect_rule) 214 return 0; 215 216 return ocelot_vcap_filter_del(ocelot, redirect_rule); 217 } 218 219 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 220 { 221 struct ocelot *ocelot = ds->priv; 222 223 if (vid_is_dsa_8021q_rxvlan(vid)) 224 return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), 225 port, vid); 226 227 if (vid_is_dsa_8021q_txvlan(vid)) 228 return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), 229 port, vid); 230 231 return 0; 232 } 233 234 /* Alternatively to using the NPI functionality, that same hardware MAC 235 * connected internally to the enetc or fman DSA master can be configured to 236 * use the software-defined tag_8021q frame format. As far as the hardware is 237 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 238 * module are now disconnected from it, but can still be accessed through 239 * register-based MMIO. 240 */ 241 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 242 { 243 ocelot->ports[port]->is_dsa_8021q_cpu = true; 244 ocelot->npi = -1; 245 246 /* Overwrite PGID_CPU with the non-tagging port */ 247 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 248 249 ocelot_apply_bridge_fwd_mask(ocelot); 250 } 251 252 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 253 { 254 ocelot->ports[port]->is_dsa_8021q_cpu = false; 255 256 /* Restore PGID_CPU */ 257 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 258 PGID_CPU); 259 260 ocelot_apply_bridge_fwd_mask(ocelot); 261 } 262 263 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. 264 * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the 265 * tag_8021q CPU port. 266 */ 267 static int felix_setup_mmio_filtering(struct felix *felix) 268 { 269 unsigned long user_ports = dsa_user_ports(felix->ds); 270 struct ocelot_vcap_filter *redirect_rule; 271 struct ocelot_vcap_filter *tagging_rule; 272 struct ocelot *ocelot = &felix->ocelot; 273 struct dsa_switch *ds = felix->ds; 274 int cpu = -1, port, ret; 275 276 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 277 if (!tagging_rule) 278 return -ENOMEM; 279 280 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 281 if (!redirect_rule) { 282 kfree(tagging_rule); 283 return -ENOMEM; 284 } 285 286 for (port = 0; port < ocelot->num_phys_ports; port++) { 287 if (dsa_is_cpu_port(ds, port)) { 288 cpu = port; 289 break; 290 } 291 } 292 293 if (cpu < 0) 294 return -EINVAL; 295 296 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; 297 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); 298 *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); 299 tagging_rule->ingress_port_mask = user_ports; 300 tagging_rule->prio = 1; 301 tagging_rule->id.cookie = ocelot->num_phys_ports; 302 tagging_rule->id.tc_offload = false; 303 tagging_rule->block_id = VCAP_IS1; 304 tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 305 tagging_rule->lookup = 0; 306 tagging_rule->action.pag_override_mask = 0xff; 307 tagging_rule->action.pag_val = ocelot->num_phys_ports; 308 309 ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); 310 if (ret) { 311 kfree(tagging_rule); 312 kfree(redirect_rule); 313 return ret; 314 } 315 316 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 317 redirect_rule->ingress_port_mask = user_ports; 318 redirect_rule->pag = ocelot->num_phys_ports; 319 redirect_rule->prio = 1; 320 redirect_rule->id.cookie = ocelot->num_phys_ports; 321 redirect_rule->id.tc_offload = false; 322 redirect_rule->block_id = VCAP_IS2; 323 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 324 redirect_rule->lookup = 0; 325 redirect_rule->action.cpu_copy_ena = true; 326 if (felix->info->quirk_no_xtr_irq) { 327 /* Redirect to the tag_8021q CPU but also copy PTP packets to 328 * the CPU port module 329 */ 330 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 331 redirect_rule->action.port_mask = BIT(cpu); 332 } else { 333 /* Trap PTP packets only to the CPU port module (which is 334 * redirected to the NPI port) 335 */ 336 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 337 redirect_rule->action.port_mask = 0; 338 } 339 340 ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 341 if (ret) { 342 ocelot_vcap_filter_del(ocelot, tagging_rule); 343 kfree(redirect_rule); 344 return ret; 345 } 346 347 /* The ownership of the CPU port module's queues might have just been 348 * transferred to the tag_8021q tagger from the NPI-based tagger. 349 * So there might still be all sorts of crap in the queues. On the 350 * other hand, the MMIO-based matching of PTP frames is very brittle, 351 * so we need to be careful that there are no extra frames to be 352 * dequeued over MMIO, since we would never know to discard them. 353 */ 354 ocelot_drain_cpu_queue(ocelot, 0); 355 356 return 0; 357 } 358 359 static int felix_teardown_mmio_filtering(struct felix *felix) 360 { 361 struct ocelot_vcap_filter *tagging_rule, *redirect_rule; 362 struct ocelot_vcap_block *block_vcap_is1; 363 struct ocelot_vcap_block *block_vcap_is2; 364 struct ocelot *ocelot = &felix->ocelot; 365 int err; 366 367 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 368 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 369 370 tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 371 ocelot->num_phys_ports, 372 false); 373 if (!tagging_rule) 374 return -ENOENT; 375 376 err = ocelot_vcap_filter_del(ocelot, tagging_rule); 377 if (err) 378 return err; 379 380 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 381 ocelot->num_phys_ports, 382 false); 383 if (!redirect_rule) 384 return -ENOENT; 385 386 return ocelot_vcap_filter_del(ocelot, redirect_rule); 387 } 388 389 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) 390 { 391 struct ocelot *ocelot = ds->priv; 392 struct felix *felix = ocelot_to_felix(ocelot); 393 unsigned long cpu_flood; 394 int port, err; 395 396 felix_8021q_cpu_port_init(ocelot, cpu); 397 398 for (port = 0; port < ds->num_ports; port++) { 399 if (dsa_is_unused_port(ds, port)) 400 continue; 401 402 /* This overwrites ocelot_init(): 403 * Do not forward BPDU frames to the CPU port module, 404 * for 2 reasons: 405 * - When these packets are injected from the tag_8021q 406 * CPU port, we want them to go out, not loop back 407 * into the system. 408 * - STP traffic ingressing on a user port should go to 409 * the tag_8021q CPU port, not to the hardware CPU 410 * port module. 411 */ 412 ocelot_write_gix(ocelot, 413 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 414 ANA_PORT_CPU_FWD_BPDU_CFG, port); 415 } 416 417 /* In tag_8021q mode, the CPU port module is unused, except for PTP 418 * frames. So we want to disable flooding of any kind to the CPU port 419 * module, since packets going there will end in a black hole. 420 */ 421 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 422 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); 423 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); 424 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); 425 426 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 427 if (err) 428 return err; 429 430 err = felix_setup_mmio_filtering(felix); 431 if (err) 432 goto out_tag_8021q_unregister; 433 434 return 0; 435 436 out_tag_8021q_unregister: 437 dsa_tag_8021q_unregister(ds); 438 return err; 439 } 440 441 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) 442 { 443 struct ocelot *ocelot = ds->priv; 444 struct felix *felix = ocelot_to_felix(ocelot); 445 int err, port; 446 447 err = felix_teardown_mmio_filtering(felix); 448 if (err) 449 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", 450 err); 451 452 dsa_tag_8021q_unregister(ds); 453 454 for (port = 0; port < ds->num_ports; port++) { 455 if (dsa_is_unused_port(ds, port)) 456 continue; 457 458 /* Restore the logic from ocelot_init: 459 * do not forward BPDU frames to the front ports. 460 */ 461 ocelot_write_gix(ocelot, 462 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 463 ANA_PORT_CPU_FWD_BPDU_CFG, 464 port); 465 } 466 467 felix_8021q_cpu_port_deinit(ocelot, cpu); 468 } 469 470 /* The CPU port module is connected to the Node Processor Interface (NPI). This 471 * is the mode through which frames can be injected from and extracted to an 472 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 473 * running Linux, and this forms a DSA setup together with the enetc or fman 474 * DSA master. 475 */ 476 static void felix_npi_port_init(struct ocelot *ocelot, int port) 477 { 478 ocelot->npi = port; 479 480 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 481 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 482 QSYS_EXT_CPU_CFG); 483 484 /* NPI port Injection/Extraction configuration */ 485 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 486 ocelot->npi_xtr_prefix); 487 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 488 ocelot->npi_inj_prefix); 489 490 /* Disable transmission of pause frames */ 491 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 492 } 493 494 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 495 { 496 /* Restore hardware defaults */ 497 int unused_port = ocelot->num_phys_ports + 2; 498 499 ocelot->npi = -1; 500 501 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 502 QSYS_EXT_CPU_CFG); 503 504 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 505 OCELOT_TAG_PREFIX_DISABLED); 506 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 507 OCELOT_TAG_PREFIX_DISABLED); 508 509 /* Enable transmission of pause frames */ 510 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 511 } 512 513 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) 514 { 515 struct ocelot *ocelot = ds->priv; 516 unsigned long cpu_flood; 517 518 felix_npi_port_init(ocelot, cpu); 519 520 /* Include the CPU port module (and indirectly, the NPI port) 521 * in the forwarding mask for unknown unicast - the hardware 522 * default value for ANA_FLOODING_FLD_UNICAST excludes 523 * BIT(ocelot->num_phys_ports), and so does ocelot_init, 524 * since Ocelot relies on whitelisting MAC addresses towards 525 * PGID_CPU. 526 * We do this because DSA does not yet perform RX filtering, 527 * and the NPI port does not perform source address learning, 528 * so traffic sent to Linux is effectively unknown from the 529 * switch's perspective. 530 */ 531 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 532 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); 533 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); 534 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); 535 536 return 0; 537 } 538 539 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) 540 { 541 struct ocelot *ocelot = ds->priv; 542 543 felix_npi_port_deinit(ocelot, cpu); 544 } 545 546 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, 547 enum dsa_tag_protocol proto) 548 { 549 int err; 550 551 switch (proto) { 552 case DSA_TAG_PROTO_SEVILLE: 553 case DSA_TAG_PROTO_OCELOT: 554 err = felix_setup_tag_npi(ds, cpu); 555 break; 556 case DSA_TAG_PROTO_OCELOT_8021Q: 557 err = felix_setup_tag_8021q(ds, cpu); 558 break; 559 default: 560 err = -EPROTONOSUPPORT; 561 } 562 563 return err; 564 } 565 566 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, 567 enum dsa_tag_protocol proto) 568 { 569 switch (proto) { 570 case DSA_TAG_PROTO_SEVILLE: 571 case DSA_TAG_PROTO_OCELOT: 572 felix_teardown_tag_npi(ds, cpu); 573 break; 574 case DSA_TAG_PROTO_OCELOT_8021Q: 575 felix_teardown_tag_8021q(ds, cpu); 576 break; 577 default: 578 break; 579 } 580 } 581 582 /* This always leaves the switch in a consistent state, because although the 583 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 584 * or the restoration is guaranteed to work. 585 */ 586 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, 587 enum dsa_tag_protocol proto) 588 { 589 struct ocelot *ocelot = ds->priv; 590 struct felix *felix = ocelot_to_felix(ocelot); 591 enum dsa_tag_protocol old_proto = felix->tag_proto; 592 int err; 593 594 if (proto != DSA_TAG_PROTO_SEVILLE && 595 proto != DSA_TAG_PROTO_OCELOT && 596 proto != DSA_TAG_PROTO_OCELOT_8021Q) 597 return -EPROTONOSUPPORT; 598 599 felix_del_tag_protocol(ds, cpu, old_proto); 600 601 err = felix_set_tag_protocol(ds, cpu, proto); 602 if (err) { 603 felix_set_tag_protocol(ds, cpu, old_proto); 604 return err; 605 } 606 607 felix->tag_proto = proto; 608 609 return 0; 610 } 611 612 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 613 int port, 614 enum dsa_tag_protocol mp) 615 { 616 struct ocelot *ocelot = ds->priv; 617 struct felix *felix = ocelot_to_felix(ocelot); 618 619 return felix->tag_proto; 620 } 621 622 static int felix_set_ageing_time(struct dsa_switch *ds, 623 unsigned int ageing_time) 624 { 625 struct ocelot *ocelot = ds->priv; 626 627 ocelot_set_ageing_time(ocelot, ageing_time); 628 629 return 0; 630 } 631 632 static int felix_fdb_dump(struct dsa_switch *ds, int port, 633 dsa_fdb_dump_cb_t *cb, void *data) 634 { 635 struct ocelot *ocelot = ds->priv; 636 637 return ocelot_fdb_dump(ocelot, port, cb, data); 638 } 639 640 static int felix_fdb_add(struct dsa_switch *ds, int port, 641 const unsigned char *addr, u16 vid) 642 { 643 struct ocelot *ocelot = ds->priv; 644 645 return ocelot_fdb_add(ocelot, port, addr, vid); 646 } 647 648 static int felix_fdb_del(struct dsa_switch *ds, int port, 649 const unsigned char *addr, u16 vid) 650 { 651 struct ocelot *ocelot = ds->priv; 652 653 return ocelot_fdb_del(ocelot, port, addr, vid); 654 } 655 656 static int felix_mdb_add(struct dsa_switch *ds, int port, 657 const struct switchdev_obj_port_mdb *mdb) 658 { 659 struct ocelot *ocelot = ds->priv; 660 661 return ocelot_port_mdb_add(ocelot, port, mdb); 662 } 663 664 static int felix_mdb_del(struct dsa_switch *ds, int port, 665 const struct switchdev_obj_port_mdb *mdb) 666 { 667 struct ocelot *ocelot = ds->priv; 668 669 return ocelot_port_mdb_del(ocelot, port, mdb); 670 } 671 672 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 673 u8 state) 674 { 675 struct ocelot *ocelot = ds->priv; 676 677 return ocelot_bridge_stp_state_set(ocelot, port, state); 678 } 679 680 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 681 struct switchdev_brport_flags val, 682 struct netlink_ext_ack *extack) 683 { 684 struct ocelot *ocelot = ds->priv; 685 686 return ocelot_port_pre_bridge_flags(ocelot, port, val); 687 } 688 689 static int felix_bridge_flags(struct dsa_switch *ds, int port, 690 struct switchdev_brport_flags val, 691 struct netlink_ext_ack *extack) 692 { 693 struct ocelot *ocelot = ds->priv; 694 695 ocelot_port_bridge_flags(ocelot, port, val); 696 697 return 0; 698 } 699 700 static int felix_bridge_join(struct dsa_switch *ds, int port, 701 struct net_device *br) 702 { 703 struct ocelot *ocelot = ds->priv; 704 705 ocelot_port_bridge_join(ocelot, port, br); 706 707 return 0; 708 } 709 710 static void felix_bridge_leave(struct dsa_switch *ds, int port, 711 struct net_device *br) 712 { 713 struct ocelot *ocelot = ds->priv; 714 715 ocelot_port_bridge_leave(ocelot, port, br); 716 } 717 718 static int felix_lag_join(struct dsa_switch *ds, int port, 719 struct net_device *bond, 720 struct netdev_lag_upper_info *info) 721 { 722 struct ocelot *ocelot = ds->priv; 723 724 return ocelot_port_lag_join(ocelot, port, bond, info); 725 } 726 727 static int felix_lag_leave(struct dsa_switch *ds, int port, 728 struct net_device *bond) 729 { 730 struct ocelot *ocelot = ds->priv; 731 732 ocelot_port_lag_leave(ocelot, port, bond); 733 734 return 0; 735 } 736 737 static int felix_lag_change(struct dsa_switch *ds, int port) 738 { 739 struct dsa_port *dp = dsa_to_port(ds, port); 740 struct ocelot *ocelot = ds->priv; 741 742 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 743 744 return 0; 745 } 746 747 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 748 const struct switchdev_obj_port_vlan *vlan, 749 struct netlink_ext_ack *extack) 750 { 751 struct ocelot *ocelot = ds->priv; 752 u16 flags = vlan->flags; 753 754 /* Ocelot switches copy frames as-is to the CPU, so the flags: 755 * egress-untagged or not, pvid or not, make no difference. This 756 * behavior is already better than what DSA just tries to approximate 757 * when it installs the VLAN with the same flags on the CPU port. 758 * Just accept any configuration, and don't let ocelot deny installing 759 * multiple native VLANs on the NPI port, because the switch doesn't 760 * look at the port tag settings towards the NPI interface anyway. 761 */ 762 if (port == ocelot->npi) 763 return 0; 764 765 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 766 flags & BRIDGE_VLAN_INFO_PVID, 767 flags & BRIDGE_VLAN_INFO_UNTAGGED, 768 extack); 769 } 770 771 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 772 struct netlink_ext_ack *extack) 773 { 774 struct ocelot *ocelot = ds->priv; 775 776 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 777 } 778 779 static int felix_vlan_add(struct dsa_switch *ds, int port, 780 const struct switchdev_obj_port_vlan *vlan, 781 struct netlink_ext_ack *extack) 782 { 783 struct ocelot *ocelot = ds->priv; 784 u16 flags = vlan->flags; 785 int err; 786 787 err = felix_vlan_prepare(ds, port, vlan, extack); 788 if (err) 789 return err; 790 791 return ocelot_vlan_add(ocelot, port, vlan->vid, 792 flags & BRIDGE_VLAN_INFO_PVID, 793 flags & BRIDGE_VLAN_INFO_UNTAGGED); 794 } 795 796 static int felix_vlan_del(struct dsa_switch *ds, int port, 797 const struct switchdev_obj_port_vlan *vlan) 798 { 799 struct ocelot *ocelot = ds->priv; 800 801 return ocelot_vlan_del(ocelot, port, vlan->vid); 802 } 803 804 static void felix_phylink_validate(struct dsa_switch *ds, int port, 805 unsigned long *supported, 806 struct phylink_link_state *state) 807 { 808 struct ocelot *ocelot = ds->priv; 809 struct felix *felix = ocelot_to_felix(ocelot); 810 811 if (felix->info->phylink_validate) 812 felix->info->phylink_validate(ocelot, port, supported, state); 813 } 814 815 static void felix_phylink_mac_config(struct dsa_switch *ds, int port, 816 unsigned int link_an_mode, 817 const struct phylink_link_state *state) 818 { 819 struct ocelot *ocelot = ds->priv; 820 struct felix *felix = ocelot_to_felix(ocelot); 821 struct dsa_port *dp = dsa_to_port(ds, port); 822 823 if (felix->pcs[port]) 824 phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); 825 } 826 827 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 828 unsigned int link_an_mode, 829 phy_interface_t interface) 830 { 831 struct ocelot *ocelot = ds->priv; 832 833 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 834 FELIX_MAC_QUIRKS); 835 } 836 837 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 838 unsigned int link_an_mode, 839 phy_interface_t interface, 840 struct phy_device *phydev, 841 int speed, int duplex, 842 bool tx_pause, bool rx_pause) 843 { 844 struct ocelot *ocelot = ds->priv; 845 struct felix *felix = ocelot_to_felix(ocelot); 846 847 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 848 interface, speed, duplex, tx_pause, rx_pause, 849 FELIX_MAC_QUIRKS); 850 851 if (felix->info->port_sched_speed_set) 852 felix->info->port_sched_speed_set(ocelot, port, speed); 853 } 854 855 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 856 { 857 int i; 858 859 ocelot_rmw_gix(ocelot, 860 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 861 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 862 ANA_PORT_QOS_CFG, 863 port); 864 865 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 866 ocelot_rmw_ix(ocelot, 867 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 868 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 869 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 870 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 871 ANA_PORT_PCP_DEI_MAP, 872 port, i); 873 } 874 } 875 876 static void felix_get_strings(struct dsa_switch *ds, int port, 877 u32 stringset, u8 *data) 878 { 879 struct ocelot *ocelot = ds->priv; 880 881 return ocelot_get_strings(ocelot, port, stringset, data); 882 } 883 884 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 885 { 886 struct ocelot *ocelot = ds->priv; 887 888 ocelot_get_ethtool_stats(ocelot, port, data); 889 } 890 891 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 892 { 893 struct ocelot *ocelot = ds->priv; 894 895 return ocelot_get_sset_count(ocelot, port, sset); 896 } 897 898 static int felix_get_ts_info(struct dsa_switch *ds, int port, 899 struct ethtool_ts_info *info) 900 { 901 struct ocelot *ocelot = ds->priv; 902 903 return ocelot_get_ts_info(ocelot, port, info); 904 } 905 906 static int felix_parse_ports_node(struct felix *felix, 907 struct device_node *ports_node, 908 phy_interface_t *port_phy_modes) 909 { 910 struct ocelot *ocelot = &felix->ocelot; 911 struct device *dev = felix->ocelot.dev; 912 struct device_node *child; 913 914 for_each_available_child_of_node(ports_node, child) { 915 phy_interface_t phy_mode; 916 u32 port; 917 int err; 918 919 /* Get switch port number from DT */ 920 if (of_property_read_u32(child, "reg", &port) < 0) { 921 dev_err(dev, "Port number not defined in device tree " 922 "(property \"reg\")\n"); 923 of_node_put(child); 924 return -ENODEV; 925 } 926 927 /* Get PHY mode from DT */ 928 err = of_get_phy_mode(child, &phy_mode); 929 if (err) { 930 dev_err(dev, "Failed to read phy-mode or " 931 "phy-interface-type property for port %d\n", 932 port); 933 of_node_put(child); 934 return -ENODEV; 935 } 936 937 err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); 938 if (err < 0) { 939 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 940 phy_modes(phy_mode), port); 941 of_node_put(child); 942 return err; 943 } 944 945 port_phy_modes[port] = phy_mode; 946 } 947 948 return 0; 949 } 950 951 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 952 { 953 struct device *dev = felix->ocelot.dev; 954 struct device_node *switch_node; 955 struct device_node *ports_node; 956 int err; 957 958 switch_node = dev->of_node; 959 960 ports_node = of_get_child_by_name(switch_node, "ports"); 961 if (!ports_node) { 962 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 963 return -ENODEV; 964 } 965 966 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 967 of_node_put(ports_node); 968 969 return err; 970 } 971 972 static int felix_init_structs(struct felix *felix, int num_phys_ports) 973 { 974 struct ocelot *ocelot = &felix->ocelot; 975 phy_interface_t *port_phy_modes; 976 struct resource res; 977 int port, i, err; 978 979 ocelot->num_phys_ports = num_phys_ports; 980 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 981 sizeof(struct ocelot_port *), GFP_KERNEL); 982 if (!ocelot->ports) 983 return -ENOMEM; 984 985 ocelot->map = felix->info->map; 986 ocelot->stats_layout = felix->info->stats_layout; 987 ocelot->num_stats = felix->info->num_stats; 988 ocelot->num_mact_rows = felix->info->num_mact_rows; 989 ocelot->vcap = felix->info->vcap; 990 ocelot->ops = felix->info->ops; 991 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 992 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 993 ocelot->devlink = felix->ds->devlink; 994 995 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 996 GFP_KERNEL); 997 if (!port_phy_modes) 998 return -ENOMEM; 999 1000 err = felix_parse_dt(felix, port_phy_modes); 1001 if (err) { 1002 kfree(port_phy_modes); 1003 return err; 1004 } 1005 1006 for (i = 0; i < TARGET_MAX; i++) { 1007 struct regmap *target; 1008 1009 if (!felix->info->target_io_res[i].name) 1010 continue; 1011 1012 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1013 res.flags = IORESOURCE_MEM; 1014 res.start += felix->switch_base; 1015 res.end += felix->switch_base; 1016 1017 target = ocelot_regmap_init(ocelot, &res); 1018 if (IS_ERR(target)) { 1019 dev_err(ocelot->dev, 1020 "Failed to map device memory space\n"); 1021 kfree(port_phy_modes); 1022 return PTR_ERR(target); 1023 } 1024 1025 ocelot->targets[i] = target; 1026 } 1027 1028 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1029 if (err) { 1030 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1031 kfree(port_phy_modes); 1032 return err; 1033 } 1034 1035 for (port = 0; port < num_phys_ports; port++) { 1036 struct ocelot_port *ocelot_port; 1037 struct regmap *target; 1038 1039 ocelot_port = devm_kzalloc(ocelot->dev, 1040 sizeof(struct ocelot_port), 1041 GFP_KERNEL); 1042 if (!ocelot_port) { 1043 dev_err(ocelot->dev, 1044 "failed to allocate port memory\n"); 1045 kfree(port_phy_modes); 1046 return -ENOMEM; 1047 } 1048 1049 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1050 res.flags = IORESOURCE_MEM; 1051 res.start += felix->switch_base; 1052 res.end += felix->switch_base; 1053 1054 target = ocelot_regmap_init(ocelot, &res); 1055 if (IS_ERR(target)) { 1056 dev_err(ocelot->dev, 1057 "Failed to map memory space for port %d\n", 1058 port); 1059 kfree(port_phy_modes); 1060 return PTR_ERR(target); 1061 } 1062 1063 ocelot_port->phy_mode = port_phy_modes[port]; 1064 ocelot_port->ocelot = ocelot; 1065 ocelot_port->target = target; 1066 ocelot->ports[port] = ocelot_port; 1067 } 1068 1069 kfree(port_phy_modes); 1070 1071 if (felix->info->mdio_bus_alloc) { 1072 err = felix->info->mdio_bus_alloc(ocelot); 1073 if (err < 0) 1074 return err; 1075 } 1076 1077 return 0; 1078 } 1079 1080 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1081 struct sk_buff *skb) 1082 { 1083 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1084 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1085 struct sk_buff *skb_match = NULL, *skb_tmp; 1086 unsigned long flags; 1087 1088 if (!clone) 1089 return; 1090 1091 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1092 1093 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1094 if (skb != clone) 1095 continue; 1096 __skb_unlink(skb, &ocelot_port->tx_skbs); 1097 skb_match = skb; 1098 break; 1099 } 1100 1101 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1102 1103 WARN_ONCE(!skb_match, 1104 "Could not find skb clone in TX timestamping list\n"); 1105 } 1106 1107 #define work_to_xmit_work(w) \ 1108 container_of((w), struct felix_deferred_xmit_work, work) 1109 1110 static void felix_port_deferred_xmit(struct kthread_work *work) 1111 { 1112 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1113 struct dsa_switch *ds = xmit_work->dp->ds; 1114 struct sk_buff *skb = xmit_work->skb; 1115 u32 rew_op = ocelot_ptp_rew_op(skb); 1116 struct ocelot *ocelot = ds->priv; 1117 int port = xmit_work->dp->index; 1118 int retries = 10; 1119 1120 do { 1121 if (ocelot_can_inject(ocelot, 0)) 1122 break; 1123 1124 cpu_relax(); 1125 } while (--retries); 1126 1127 if (!retries) { 1128 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1129 port); 1130 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1131 kfree_skb(skb); 1132 return; 1133 } 1134 1135 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1136 1137 consume_skb(skb); 1138 kfree(xmit_work); 1139 } 1140 1141 static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port) 1142 { 1143 struct dsa_port *dp = dsa_to_port(ds, port); 1144 struct ocelot *ocelot = ds->priv; 1145 struct felix *felix = ocelot_to_felix(ocelot); 1146 struct felix_port *felix_port; 1147 1148 if (!dsa_port_is_user(dp)) 1149 return 0; 1150 1151 felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL); 1152 if (!felix_port) 1153 return -ENOMEM; 1154 1155 felix_port->xmit_worker = felix->xmit_worker; 1156 felix_port->xmit_work_fn = felix_port_deferred_xmit; 1157 1158 dp->priv = felix_port; 1159 1160 return 0; 1161 } 1162 1163 static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port) 1164 { 1165 struct dsa_port *dp = dsa_to_port(ds, port); 1166 struct felix_port *felix_port = dp->priv; 1167 1168 if (!felix_port) 1169 return; 1170 1171 dp->priv = NULL; 1172 kfree(felix_port); 1173 } 1174 1175 /* Hardware initialization done here so that we can allocate structures with 1176 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1177 * us to allocate structures twice (leak memory) and map PCI memory twice 1178 * (which will not work). 1179 */ 1180 static int felix_setup(struct dsa_switch *ds) 1181 { 1182 struct ocelot *ocelot = ds->priv; 1183 struct felix *felix = ocelot_to_felix(ocelot); 1184 int port, err; 1185 1186 err = felix_init_structs(felix, ds->num_ports); 1187 if (err) 1188 return err; 1189 1190 err = ocelot_init(ocelot); 1191 if (err) 1192 goto out_mdiobus_free; 1193 1194 if (ocelot->ptp) { 1195 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1196 if (err) { 1197 dev_err(ocelot->dev, 1198 "Timestamp initialization failed\n"); 1199 ocelot->ptp = 0; 1200 } 1201 } 1202 1203 felix->xmit_worker = kthread_create_worker(0, "felix_xmit"); 1204 if (IS_ERR(felix->xmit_worker)) { 1205 err = PTR_ERR(felix->xmit_worker); 1206 goto out_deinit_timestamp; 1207 } 1208 1209 for (port = 0; port < ds->num_ports; port++) { 1210 if (dsa_is_unused_port(ds, port)) 1211 continue; 1212 1213 ocelot_init_port(ocelot, port); 1214 1215 /* Set the default QoS Classification based on PCP and DEI 1216 * bits of vlan tag. 1217 */ 1218 felix_port_qos_map_init(ocelot, port); 1219 1220 err = felix_port_setup_tagger_data(ds, port); 1221 if (err) { 1222 dev_err(ds->dev, 1223 "port %d failed to set up tagger data: %pe\n", 1224 port, ERR_PTR(err)); 1225 goto out_deinit_ports; 1226 } 1227 } 1228 1229 err = ocelot_devlink_sb_register(ocelot); 1230 if (err) 1231 goto out_deinit_ports; 1232 1233 for (port = 0; port < ds->num_ports; port++) { 1234 if (!dsa_is_cpu_port(ds, port)) 1235 continue; 1236 1237 /* The initial tag protocol is NPI which always returns 0, so 1238 * there's no real point in checking for errors. 1239 */ 1240 felix_set_tag_protocol(ds, port, felix->tag_proto); 1241 break; 1242 } 1243 1244 ds->mtu_enforcement_ingress = true; 1245 ds->assisted_learning_on_cpu_port = true; 1246 1247 return 0; 1248 1249 out_deinit_ports: 1250 for (port = 0; port < ocelot->num_phys_ports; port++) { 1251 if (dsa_is_unused_port(ds, port)) 1252 continue; 1253 1254 felix_port_teardown_tagger_data(ds, port); 1255 ocelot_deinit_port(ocelot, port); 1256 } 1257 1258 kthread_destroy_worker(felix->xmit_worker); 1259 1260 out_deinit_timestamp: 1261 ocelot_deinit_timestamp(ocelot); 1262 ocelot_deinit(ocelot); 1263 1264 out_mdiobus_free: 1265 if (felix->info->mdio_bus_free) 1266 felix->info->mdio_bus_free(ocelot); 1267 1268 return err; 1269 } 1270 1271 static void felix_teardown(struct dsa_switch *ds) 1272 { 1273 struct ocelot *ocelot = ds->priv; 1274 struct felix *felix = ocelot_to_felix(ocelot); 1275 int port; 1276 1277 for (port = 0; port < ds->num_ports; port++) { 1278 if (!dsa_is_cpu_port(ds, port)) 1279 continue; 1280 1281 felix_del_tag_protocol(ds, port, felix->tag_proto); 1282 break; 1283 } 1284 1285 for (port = 0; port < ocelot->num_phys_ports; port++) { 1286 if (dsa_is_unused_port(ds, port)) 1287 continue; 1288 1289 felix_port_teardown_tagger_data(ds, port); 1290 ocelot_deinit_port(ocelot, port); 1291 } 1292 1293 kthread_destroy_worker(felix->xmit_worker); 1294 1295 ocelot_devlink_sb_unregister(ocelot); 1296 ocelot_deinit_timestamp(ocelot); 1297 ocelot_deinit(ocelot); 1298 1299 if (felix->info->mdio_bus_free) 1300 felix->info->mdio_bus_free(ocelot); 1301 } 1302 1303 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1304 struct ifreq *ifr) 1305 { 1306 struct ocelot *ocelot = ds->priv; 1307 1308 return ocelot_hwstamp_get(ocelot, port, ifr); 1309 } 1310 1311 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1312 struct ifreq *ifr) 1313 { 1314 struct ocelot *ocelot = ds->priv; 1315 1316 return ocelot_hwstamp_set(ocelot, port, ifr); 1317 } 1318 1319 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) 1320 { 1321 struct felix *felix = ocelot_to_felix(ocelot); 1322 int err, grp = 0; 1323 1324 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1325 return false; 1326 1327 if (!felix->info->quirk_no_xtr_irq) 1328 return false; 1329 1330 if (ptp_type == PTP_CLASS_NONE) 1331 return false; 1332 1333 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1334 struct sk_buff *skb; 1335 unsigned int type; 1336 1337 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1338 if (err) 1339 goto out; 1340 1341 /* We trap to the CPU port module all PTP frames, but 1342 * felix_rxtstamp() only gets called for event frames. 1343 * So we need to avoid sending duplicate general 1344 * message frames by running a second BPF classifier 1345 * here and dropping those. 1346 */ 1347 __skb_push(skb, ETH_HLEN); 1348 1349 type = ptp_classify_raw(skb); 1350 1351 __skb_pull(skb, ETH_HLEN); 1352 1353 if (type == PTP_CLASS_NONE) { 1354 kfree_skb(skb); 1355 continue; 1356 } 1357 1358 netif_rx(skb); 1359 } 1360 1361 out: 1362 if (err < 0) 1363 ocelot_drain_cpu_queue(ocelot, 0); 1364 1365 return true; 1366 } 1367 1368 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1369 struct sk_buff *skb, unsigned int type) 1370 { 1371 u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN; 1372 struct skb_shared_hwtstamps *shhwtstamps; 1373 struct ocelot *ocelot = ds->priv; 1374 u32 tstamp_lo, tstamp_hi; 1375 struct timespec64 ts; 1376 u64 tstamp, val; 1377 1378 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1379 * for RX timestamping. Then free it, and poll for its copy through 1380 * MMIO in the CPU port module, and inject that into the stack from 1381 * ocelot_xtr_poll(). 1382 */ 1383 if (felix_check_xtr_pkt(ocelot, type)) { 1384 kfree_skb(skb); 1385 return true; 1386 } 1387 1388 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1389 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1390 1391 ocelot_xfh_get_rew_val(extraction, &val); 1392 tstamp_lo = (u32)val; 1393 1394 tstamp_hi = tstamp >> 32; 1395 if ((tstamp & 0xffffffff) < tstamp_lo) 1396 tstamp_hi--; 1397 1398 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1399 1400 shhwtstamps = skb_hwtstamps(skb); 1401 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1402 shhwtstamps->hwtstamp = tstamp; 1403 return false; 1404 } 1405 1406 static void felix_txtstamp(struct dsa_switch *ds, int port, 1407 struct sk_buff *skb) 1408 { 1409 struct ocelot *ocelot = ds->priv; 1410 struct sk_buff *clone = NULL; 1411 1412 if (!ocelot->ptp) 1413 return; 1414 1415 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1416 dev_err_ratelimited(ds->dev, 1417 "port %d delivering skb without TX timestamp\n", 1418 port); 1419 return; 1420 } 1421 1422 if (clone) 1423 OCELOT_SKB_CB(skb)->clone = clone; 1424 } 1425 1426 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1427 { 1428 struct ocelot *ocelot = ds->priv; 1429 1430 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1431 1432 return 0; 1433 } 1434 1435 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1436 { 1437 struct ocelot *ocelot = ds->priv; 1438 1439 return ocelot_get_max_mtu(ocelot, port); 1440 } 1441 1442 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1443 struct flow_cls_offload *cls, bool ingress) 1444 { 1445 struct ocelot *ocelot = ds->priv; 1446 1447 return ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1448 } 1449 1450 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1451 struct flow_cls_offload *cls, bool ingress) 1452 { 1453 struct ocelot *ocelot = ds->priv; 1454 1455 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1456 } 1457 1458 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1459 struct flow_cls_offload *cls, bool ingress) 1460 { 1461 struct ocelot *ocelot = ds->priv; 1462 1463 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1464 } 1465 1466 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1467 struct dsa_mall_policer_tc_entry *policer) 1468 { 1469 struct ocelot *ocelot = ds->priv; 1470 struct ocelot_policer pol = { 1471 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1472 .burst = policer->burst, 1473 }; 1474 1475 return ocelot_port_policer_add(ocelot, port, &pol); 1476 } 1477 1478 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1479 { 1480 struct ocelot *ocelot = ds->priv; 1481 1482 ocelot_port_policer_del(ocelot, port); 1483 } 1484 1485 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1486 enum tc_setup_type type, 1487 void *type_data) 1488 { 1489 struct ocelot *ocelot = ds->priv; 1490 struct felix *felix = ocelot_to_felix(ocelot); 1491 1492 if (felix->info->port_setup_tc) 1493 return felix->info->port_setup_tc(ds, port, type, type_data); 1494 else 1495 return -EOPNOTSUPP; 1496 } 1497 1498 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1499 u16 pool_index, 1500 struct devlink_sb_pool_info *pool_info) 1501 { 1502 struct ocelot *ocelot = ds->priv; 1503 1504 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1505 } 1506 1507 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1508 u16 pool_index, u32 size, 1509 enum devlink_sb_threshold_type threshold_type, 1510 struct netlink_ext_ack *extack) 1511 { 1512 struct ocelot *ocelot = ds->priv; 1513 1514 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1515 threshold_type, extack); 1516 } 1517 1518 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1519 unsigned int sb_index, u16 pool_index, 1520 u32 *p_threshold) 1521 { 1522 struct ocelot *ocelot = ds->priv; 1523 1524 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1525 p_threshold); 1526 } 1527 1528 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1529 unsigned int sb_index, u16 pool_index, 1530 u32 threshold, struct netlink_ext_ack *extack) 1531 { 1532 struct ocelot *ocelot = ds->priv; 1533 1534 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1535 threshold, extack); 1536 } 1537 1538 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1539 unsigned int sb_index, u16 tc_index, 1540 enum devlink_sb_pool_type pool_type, 1541 u16 *p_pool_index, u32 *p_threshold) 1542 { 1543 struct ocelot *ocelot = ds->priv; 1544 1545 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1546 pool_type, p_pool_index, 1547 p_threshold); 1548 } 1549 1550 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1551 unsigned int sb_index, u16 tc_index, 1552 enum devlink_sb_pool_type pool_type, 1553 u16 pool_index, u32 threshold, 1554 struct netlink_ext_ack *extack) 1555 { 1556 struct ocelot *ocelot = ds->priv; 1557 1558 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1559 pool_type, pool_index, threshold, 1560 extack); 1561 } 1562 1563 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1564 unsigned int sb_index) 1565 { 1566 struct ocelot *ocelot = ds->priv; 1567 1568 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1569 } 1570 1571 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1572 unsigned int sb_index) 1573 { 1574 struct ocelot *ocelot = ds->priv; 1575 1576 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1577 } 1578 1579 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1580 unsigned int sb_index, u16 pool_index, 1581 u32 *p_cur, u32 *p_max) 1582 { 1583 struct ocelot *ocelot = ds->priv; 1584 1585 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1586 p_cur, p_max); 1587 } 1588 1589 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1590 unsigned int sb_index, u16 tc_index, 1591 enum devlink_sb_pool_type pool_type, 1592 u32 *p_cur, u32 *p_max) 1593 { 1594 struct ocelot *ocelot = ds->priv; 1595 1596 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1597 pool_type, p_cur, p_max); 1598 } 1599 1600 static int felix_mrp_add(struct dsa_switch *ds, int port, 1601 const struct switchdev_obj_mrp *mrp) 1602 { 1603 struct ocelot *ocelot = ds->priv; 1604 1605 return ocelot_mrp_add(ocelot, port, mrp); 1606 } 1607 1608 static int felix_mrp_del(struct dsa_switch *ds, int port, 1609 const struct switchdev_obj_mrp *mrp) 1610 { 1611 struct ocelot *ocelot = ds->priv; 1612 1613 return ocelot_mrp_add(ocelot, port, mrp); 1614 } 1615 1616 static int 1617 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1618 const struct switchdev_obj_ring_role_mrp *mrp) 1619 { 1620 struct ocelot *ocelot = ds->priv; 1621 1622 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1623 } 1624 1625 static int 1626 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1627 const struct switchdev_obj_ring_role_mrp *mrp) 1628 { 1629 struct ocelot *ocelot = ds->priv; 1630 1631 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1632 } 1633 1634 const struct dsa_switch_ops felix_switch_ops = { 1635 .get_tag_protocol = felix_get_tag_protocol, 1636 .change_tag_protocol = felix_change_tag_protocol, 1637 .setup = felix_setup, 1638 .teardown = felix_teardown, 1639 .set_ageing_time = felix_set_ageing_time, 1640 .get_strings = felix_get_strings, 1641 .get_ethtool_stats = felix_get_ethtool_stats, 1642 .get_sset_count = felix_get_sset_count, 1643 .get_ts_info = felix_get_ts_info, 1644 .phylink_validate = felix_phylink_validate, 1645 .phylink_mac_config = felix_phylink_mac_config, 1646 .phylink_mac_link_down = felix_phylink_mac_link_down, 1647 .phylink_mac_link_up = felix_phylink_mac_link_up, 1648 .port_fdb_dump = felix_fdb_dump, 1649 .port_fdb_add = felix_fdb_add, 1650 .port_fdb_del = felix_fdb_del, 1651 .port_mdb_add = felix_mdb_add, 1652 .port_mdb_del = felix_mdb_del, 1653 .port_pre_bridge_flags = felix_pre_bridge_flags, 1654 .port_bridge_flags = felix_bridge_flags, 1655 .port_bridge_join = felix_bridge_join, 1656 .port_bridge_leave = felix_bridge_leave, 1657 .port_lag_join = felix_lag_join, 1658 .port_lag_leave = felix_lag_leave, 1659 .port_lag_change = felix_lag_change, 1660 .port_stp_state_set = felix_bridge_stp_state_set, 1661 .port_vlan_filtering = felix_vlan_filtering, 1662 .port_vlan_add = felix_vlan_add, 1663 .port_vlan_del = felix_vlan_del, 1664 .port_hwtstamp_get = felix_hwtstamp_get, 1665 .port_hwtstamp_set = felix_hwtstamp_set, 1666 .port_rxtstamp = felix_rxtstamp, 1667 .port_txtstamp = felix_txtstamp, 1668 .port_change_mtu = felix_change_mtu, 1669 .port_max_mtu = felix_get_max_mtu, 1670 .port_policer_add = felix_port_policer_add, 1671 .port_policer_del = felix_port_policer_del, 1672 .cls_flower_add = felix_cls_flower_add, 1673 .cls_flower_del = felix_cls_flower_del, 1674 .cls_flower_stats = felix_cls_flower_stats, 1675 .port_setup_tc = felix_port_setup_tc, 1676 .devlink_sb_pool_get = felix_sb_pool_get, 1677 .devlink_sb_pool_set = felix_sb_pool_set, 1678 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1679 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1680 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1681 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1682 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1683 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1684 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1685 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1686 .port_mrp_add = felix_mrp_add, 1687 .port_mrp_del = felix_mrp_del, 1688 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1689 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1690 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1691 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1692 }; 1693 1694 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1695 { 1696 struct felix *felix = ocelot_to_felix(ocelot); 1697 struct dsa_switch *ds = felix->ds; 1698 1699 if (!dsa_is_user_port(ds, port)) 1700 return NULL; 1701 1702 return dsa_to_port(ds, port)->slave; 1703 } 1704 1705 int felix_netdev_to_port(struct net_device *dev) 1706 { 1707 struct dsa_port *dp; 1708 1709 dp = dsa_port_from_netdev(dev); 1710 if (IS_ERR(dp)) 1711 return -EINVAL; 1712 1713 return dp->index; 1714 } 1715