1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <net/pkt_sched.h> 25 #include <net/dsa.h> 26 #include "felix.h" 27 28 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, 29 bool pvid, bool untagged) 30 { 31 struct ocelot_vcap_filter *outer_tagging_rule; 32 struct ocelot *ocelot = &felix->ocelot; 33 struct dsa_switch *ds = felix->ds; 34 int key_length, upstream, err; 35 36 /* We don't need to install the rxvlan into the other ports' filtering 37 * tables, because we're just pushing the rxvlan when sending towards 38 * the CPU 39 */ 40 if (!pvid) 41 return 0; 42 43 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 44 upstream = dsa_upstream_port(ds, port); 45 46 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 47 GFP_KERNEL); 48 if (!outer_tagging_rule) 49 return -ENOMEM; 50 51 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 52 outer_tagging_rule->prio = 1; 53 outer_tagging_rule->id.cookie = port; 54 outer_tagging_rule->id.tc_offload = false; 55 outer_tagging_rule->block_id = VCAP_ES0; 56 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 57 outer_tagging_rule->lookup = 0; 58 outer_tagging_rule->ingress_port.value = port; 59 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 60 outer_tagging_rule->egress_port.value = upstream; 61 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 62 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 63 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 64 outer_tagging_rule->action.tag_a_vid_sel = 1; 65 outer_tagging_rule->action.vid_a_val = vid; 66 67 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 68 if (err) 69 kfree(outer_tagging_rule); 70 71 return err; 72 } 73 74 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, 75 bool pvid, bool untagged) 76 { 77 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 78 struct ocelot *ocelot = &felix->ocelot; 79 struct dsa_switch *ds = felix->ds; 80 int upstream, err; 81 82 /* tag_8021q.c assumes we are implementing this via port VLAN 83 * membership, which we aren't. So we don't need to add any VCAP filter 84 * for the CPU port. 85 */ 86 if (ocelot->ports[port]->is_dsa_8021q_cpu) 87 return 0; 88 89 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 90 if (!untagging_rule) 91 return -ENOMEM; 92 93 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 94 if (!redirect_rule) { 95 kfree(untagging_rule); 96 return -ENOMEM; 97 } 98 99 upstream = dsa_upstream_port(ds, port); 100 101 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 102 untagging_rule->ingress_port_mask = BIT(upstream); 103 untagging_rule->vlan.vid.value = vid; 104 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 105 untagging_rule->prio = 1; 106 untagging_rule->id.cookie = port; 107 untagging_rule->id.tc_offload = false; 108 untagging_rule->block_id = VCAP_IS1; 109 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 110 untagging_rule->lookup = 0; 111 untagging_rule->action.vlan_pop_cnt_ena = true; 112 untagging_rule->action.vlan_pop_cnt = 1; 113 untagging_rule->action.pag_override_mask = 0xff; 114 untagging_rule->action.pag_val = port; 115 116 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 117 if (err) { 118 kfree(untagging_rule); 119 kfree(redirect_rule); 120 return err; 121 } 122 123 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 124 redirect_rule->ingress_port_mask = BIT(upstream); 125 redirect_rule->pag = port; 126 redirect_rule->prio = 1; 127 redirect_rule->id.cookie = port; 128 redirect_rule->id.tc_offload = false; 129 redirect_rule->block_id = VCAP_IS2; 130 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 131 redirect_rule->lookup = 0; 132 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 133 redirect_rule->action.port_mask = BIT(port); 134 135 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 136 if (err) { 137 ocelot_vcap_filter_del(ocelot, untagging_rule); 138 kfree(redirect_rule); 139 return err; 140 } 141 142 return 0; 143 } 144 145 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 146 u16 flags) 147 { 148 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 149 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 150 struct ocelot *ocelot = ds->priv; 151 152 if (vid_is_dsa_8021q_rxvlan(vid)) 153 return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), 154 port, vid, pvid, untagged); 155 156 if (vid_is_dsa_8021q_txvlan(vid)) 157 return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), 158 port, vid, pvid, untagged); 159 160 return 0; 161 } 162 163 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) 164 { 165 struct ocelot_vcap_filter *outer_tagging_rule; 166 struct ocelot_vcap_block *block_vcap_es0; 167 struct ocelot *ocelot = &felix->ocelot; 168 169 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 170 171 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 172 port, false); 173 /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid 174 * installing outer tagging ES0 rules where they weren't needed. 175 * But in rxvlan_del, the API doesn't give us the "flags" anymore, 176 * so that forces us to be slightly sloppy here, and just assume that 177 * if we didn't find an outer_tagging_rule it means that there was 178 * none in the first place, i.e. rxvlan_del is called on a non-pvid 179 * port. This is most probably true though. 180 */ 181 if (!outer_tagging_rule) 182 return 0; 183 184 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 185 } 186 187 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) 188 { 189 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 190 struct ocelot_vcap_block *block_vcap_is1; 191 struct ocelot_vcap_block *block_vcap_is2; 192 struct ocelot *ocelot = &felix->ocelot; 193 int err; 194 195 if (ocelot->ports[port]->is_dsa_8021q_cpu) 196 return 0; 197 198 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 199 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 200 201 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 202 port, false); 203 if (!untagging_rule) 204 return 0; 205 206 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 207 if (err) 208 return err; 209 210 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 211 port, false); 212 if (!redirect_rule) 213 return 0; 214 215 return ocelot_vcap_filter_del(ocelot, redirect_rule); 216 } 217 218 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 219 { 220 struct ocelot *ocelot = ds->priv; 221 222 if (vid_is_dsa_8021q_rxvlan(vid)) 223 return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), 224 port, vid); 225 226 if (vid_is_dsa_8021q_txvlan(vid)) 227 return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), 228 port, vid); 229 230 return 0; 231 } 232 233 /* Alternatively to using the NPI functionality, that same hardware MAC 234 * connected internally to the enetc or fman DSA master can be configured to 235 * use the software-defined tag_8021q frame format. As far as the hardware is 236 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 237 * module are now disconnected from it, but can still be accessed through 238 * register-based MMIO. 239 */ 240 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 241 { 242 mutex_lock(&ocelot->fwd_domain_lock); 243 244 ocelot->ports[port]->is_dsa_8021q_cpu = true; 245 ocelot->npi = -1; 246 247 /* Overwrite PGID_CPU with the non-tagging port */ 248 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 249 250 ocelot_apply_bridge_fwd_mask(ocelot, true); 251 252 mutex_unlock(&ocelot->fwd_domain_lock); 253 } 254 255 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 256 { 257 mutex_lock(&ocelot->fwd_domain_lock); 258 259 ocelot->ports[port]->is_dsa_8021q_cpu = false; 260 261 /* Restore PGID_CPU */ 262 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 263 PGID_CPU); 264 265 ocelot_apply_bridge_fwd_mask(ocelot, true); 266 267 mutex_unlock(&ocelot->fwd_domain_lock); 268 } 269 270 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. 271 * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the 272 * tag_8021q CPU port. 273 */ 274 static int felix_setup_mmio_filtering(struct felix *felix) 275 { 276 unsigned long user_ports = dsa_user_ports(felix->ds); 277 struct ocelot_vcap_filter *redirect_rule; 278 struct ocelot_vcap_filter *tagging_rule; 279 struct ocelot *ocelot = &felix->ocelot; 280 struct dsa_switch *ds = felix->ds; 281 int cpu = -1, port, ret; 282 283 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 284 if (!tagging_rule) 285 return -ENOMEM; 286 287 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 288 if (!redirect_rule) { 289 kfree(tagging_rule); 290 return -ENOMEM; 291 } 292 293 for (port = 0; port < ocelot->num_phys_ports; port++) { 294 if (dsa_is_cpu_port(ds, port)) { 295 cpu = port; 296 break; 297 } 298 } 299 300 if (cpu < 0) { 301 kfree(tagging_rule); 302 kfree(redirect_rule); 303 return -EINVAL; 304 } 305 306 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; 307 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); 308 *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); 309 tagging_rule->ingress_port_mask = user_ports; 310 tagging_rule->prio = 1; 311 tagging_rule->id.cookie = ocelot->num_phys_ports; 312 tagging_rule->id.tc_offload = false; 313 tagging_rule->block_id = VCAP_IS1; 314 tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 315 tagging_rule->lookup = 0; 316 tagging_rule->action.pag_override_mask = 0xff; 317 tagging_rule->action.pag_val = ocelot->num_phys_ports; 318 319 ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); 320 if (ret) { 321 kfree(tagging_rule); 322 kfree(redirect_rule); 323 return ret; 324 } 325 326 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 327 redirect_rule->ingress_port_mask = user_ports; 328 redirect_rule->pag = ocelot->num_phys_ports; 329 redirect_rule->prio = 1; 330 redirect_rule->id.cookie = ocelot->num_phys_ports; 331 redirect_rule->id.tc_offload = false; 332 redirect_rule->block_id = VCAP_IS2; 333 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 334 redirect_rule->lookup = 0; 335 redirect_rule->action.cpu_copy_ena = true; 336 if (felix->info->quirk_no_xtr_irq) { 337 /* Redirect to the tag_8021q CPU but also copy PTP packets to 338 * the CPU port module 339 */ 340 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 341 redirect_rule->action.port_mask = BIT(cpu); 342 } else { 343 /* Trap PTP packets only to the CPU port module (which is 344 * redirected to the NPI port) 345 */ 346 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 347 redirect_rule->action.port_mask = 0; 348 } 349 350 ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 351 if (ret) { 352 ocelot_vcap_filter_del(ocelot, tagging_rule); 353 kfree(redirect_rule); 354 return ret; 355 } 356 357 /* The ownership of the CPU port module's queues might have just been 358 * transferred to the tag_8021q tagger from the NPI-based tagger. 359 * So there might still be all sorts of crap in the queues. On the 360 * other hand, the MMIO-based matching of PTP frames is very brittle, 361 * so we need to be careful that there are no extra frames to be 362 * dequeued over MMIO, since we would never know to discard them. 363 */ 364 ocelot_drain_cpu_queue(ocelot, 0); 365 366 return 0; 367 } 368 369 static int felix_teardown_mmio_filtering(struct felix *felix) 370 { 371 struct ocelot_vcap_filter *tagging_rule, *redirect_rule; 372 struct ocelot_vcap_block *block_vcap_is1; 373 struct ocelot_vcap_block *block_vcap_is2; 374 struct ocelot *ocelot = &felix->ocelot; 375 int err; 376 377 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 378 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 379 380 tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 381 ocelot->num_phys_ports, 382 false); 383 if (!tagging_rule) 384 return -ENOENT; 385 386 err = ocelot_vcap_filter_del(ocelot, tagging_rule); 387 if (err) 388 return err; 389 390 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 391 ocelot->num_phys_ports, 392 false); 393 if (!redirect_rule) 394 return -ENOENT; 395 396 return ocelot_vcap_filter_del(ocelot, redirect_rule); 397 } 398 399 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) 400 { 401 struct ocelot *ocelot = ds->priv; 402 struct felix *felix = ocelot_to_felix(ocelot); 403 unsigned long cpu_flood; 404 int port, err; 405 406 felix_8021q_cpu_port_init(ocelot, cpu); 407 408 for (port = 0; port < ds->num_ports; port++) { 409 if (dsa_is_unused_port(ds, port)) 410 continue; 411 412 /* This overwrites ocelot_init(): 413 * Do not forward BPDU frames to the CPU port module, 414 * for 2 reasons: 415 * - When these packets are injected from the tag_8021q 416 * CPU port, we want them to go out, not loop back 417 * into the system. 418 * - STP traffic ingressing on a user port should go to 419 * the tag_8021q CPU port, not to the hardware CPU 420 * port module. 421 */ 422 ocelot_write_gix(ocelot, 423 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 424 ANA_PORT_CPU_FWD_BPDU_CFG, port); 425 } 426 427 /* In tag_8021q mode, the CPU port module is unused, except for PTP 428 * frames. So we want to disable flooding of any kind to the CPU port 429 * module, since packets going there will end in a black hole. 430 */ 431 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 432 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); 433 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); 434 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); 435 436 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 437 if (err) 438 return err; 439 440 err = felix_setup_mmio_filtering(felix); 441 if (err) 442 goto out_tag_8021q_unregister; 443 444 return 0; 445 446 out_tag_8021q_unregister: 447 dsa_tag_8021q_unregister(ds); 448 return err; 449 } 450 451 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) 452 { 453 struct ocelot *ocelot = ds->priv; 454 struct felix *felix = ocelot_to_felix(ocelot); 455 int err, port; 456 457 err = felix_teardown_mmio_filtering(felix); 458 if (err) 459 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", 460 err); 461 462 dsa_tag_8021q_unregister(ds); 463 464 for (port = 0; port < ds->num_ports; port++) { 465 if (dsa_is_unused_port(ds, port)) 466 continue; 467 468 /* Restore the logic from ocelot_init: 469 * do not forward BPDU frames to the front ports. 470 */ 471 ocelot_write_gix(ocelot, 472 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 473 ANA_PORT_CPU_FWD_BPDU_CFG, 474 port); 475 } 476 477 felix_8021q_cpu_port_deinit(ocelot, cpu); 478 } 479 480 /* The CPU port module is connected to the Node Processor Interface (NPI). This 481 * is the mode through which frames can be injected from and extracted to an 482 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 483 * running Linux, and this forms a DSA setup together with the enetc or fman 484 * DSA master. 485 */ 486 static void felix_npi_port_init(struct ocelot *ocelot, int port) 487 { 488 ocelot->npi = port; 489 490 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 491 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 492 QSYS_EXT_CPU_CFG); 493 494 /* NPI port Injection/Extraction configuration */ 495 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 496 ocelot->npi_xtr_prefix); 497 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 498 ocelot->npi_inj_prefix); 499 500 /* Disable transmission of pause frames */ 501 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 502 } 503 504 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 505 { 506 /* Restore hardware defaults */ 507 int unused_port = ocelot->num_phys_ports + 2; 508 509 ocelot->npi = -1; 510 511 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 512 QSYS_EXT_CPU_CFG); 513 514 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 515 OCELOT_TAG_PREFIX_DISABLED); 516 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 517 OCELOT_TAG_PREFIX_DISABLED); 518 519 /* Enable transmission of pause frames */ 520 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 521 } 522 523 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) 524 { 525 struct ocelot *ocelot = ds->priv; 526 unsigned long cpu_flood; 527 528 felix_npi_port_init(ocelot, cpu); 529 530 /* Include the CPU port module (and indirectly, the NPI port) 531 * in the forwarding mask for unknown unicast - the hardware 532 * default value for ANA_FLOODING_FLD_UNICAST excludes 533 * BIT(ocelot->num_phys_ports), and so does ocelot_init, 534 * since Ocelot relies on whitelisting MAC addresses towards 535 * PGID_CPU. 536 * We do this because DSA does not yet perform RX filtering, 537 * and the NPI port does not perform source address learning, 538 * so traffic sent to Linux is effectively unknown from the 539 * switch's perspective. 540 */ 541 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 542 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); 543 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); 544 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); 545 546 return 0; 547 } 548 549 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) 550 { 551 struct ocelot *ocelot = ds->priv; 552 553 felix_npi_port_deinit(ocelot, cpu); 554 } 555 556 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, 557 enum dsa_tag_protocol proto) 558 { 559 int err; 560 561 switch (proto) { 562 case DSA_TAG_PROTO_SEVILLE: 563 case DSA_TAG_PROTO_OCELOT: 564 err = felix_setup_tag_npi(ds, cpu); 565 break; 566 case DSA_TAG_PROTO_OCELOT_8021Q: 567 err = felix_setup_tag_8021q(ds, cpu); 568 break; 569 default: 570 err = -EPROTONOSUPPORT; 571 } 572 573 return err; 574 } 575 576 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, 577 enum dsa_tag_protocol proto) 578 { 579 switch (proto) { 580 case DSA_TAG_PROTO_SEVILLE: 581 case DSA_TAG_PROTO_OCELOT: 582 felix_teardown_tag_npi(ds, cpu); 583 break; 584 case DSA_TAG_PROTO_OCELOT_8021Q: 585 felix_teardown_tag_8021q(ds, cpu); 586 break; 587 default: 588 break; 589 } 590 } 591 592 /* This always leaves the switch in a consistent state, because although the 593 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 594 * or the restoration is guaranteed to work. 595 */ 596 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, 597 enum dsa_tag_protocol proto) 598 { 599 struct ocelot *ocelot = ds->priv; 600 struct felix *felix = ocelot_to_felix(ocelot); 601 enum dsa_tag_protocol old_proto = felix->tag_proto; 602 int err; 603 604 if (proto != DSA_TAG_PROTO_SEVILLE && 605 proto != DSA_TAG_PROTO_OCELOT && 606 proto != DSA_TAG_PROTO_OCELOT_8021Q) 607 return -EPROTONOSUPPORT; 608 609 felix_del_tag_protocol(ds, cpu, old_proto); 610 611 err = felix_set_tag_protocol(ds, cpu, proto); 612 if (err) { 613 felix_set_tag_protocol(ds, cpu, old_proto); 614 return err; 615 } 616 617 felix->tag_proto = proto; 618 619 return 0; 620 } 621 622 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 623 int port, 624 enum dsa_tag_protocol mp) 625 { 626 struct ocelot *ocelot = ds->priv; 627 struct felix *felix = ocelot_to_felix(ocelot); 628 629 return felix->tag_proto; 630 } 631 632 static int felix_set_ageing_time(struct dsa_switch *ds, 633 unsigned int ageing_time) 634 { 635 struct ocelot *ocelot = ds->priv; 636 637 ocelot_set_ageing_time(ocelot, ageing_time); 638 639 return 0; 640 } 641 642 static void felix_port_fast_age(struct dsa_switch *ds, int port) 643 { 644 struct ocelot *ocelot = ds->priv; 645 int err; 646 647 err = ocelot_mact_flush(ocelot, port); 648 if (err) 649 dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", 650 port, ERR_PTR(err)); 651 } 652 653 static int felix_fdb_dump(struct dsa_switch *ds, int port, 654 dsa_fdb_dump_cb_t *cb, void *data) 655 { 656 struct ocelot *ocelot = ds->priv; 657 658 return ocelot_fdb_dump(ocelot, port, cb, data); 659 } 660 661 static int felix_fdb_add(struct dsa_switch *ds, int port, 662 const unsigned char *addr, u16 vid) 663 { 664 struct ocelot *ocelot = ds->priv; 665 666 return ocelot_fdb_add(ocelot, port, addr, vid); 667 } 668 669 static int felix_fdb_del(struct dsa_switch *ds, int port, 670 const unsigned char *addr, u16 vid) 671 { 672 struct ocelot *ocelot = ds->priv; 673 674 return ocelot_fdb_del(ocelot, port, addr, vid); 675 } 676 677 static int felix_mdb_add(struct dsa_switch *ds, int port, 678 const struct switchdev_obj_port_mdb *mdb) 679 { 680 struct ocelot *ocelot = ds->priv; 681 682 return ocelot_port_mdb_add(ocelot, port, mdb); 683 } 684 685 static int felix_mdb_del(struct dsa_switch *ds, int port, 686 const struct switchdev_obj_port_mdb *mdb) 687 { 688 struct ocelot *ocelot = ds->priv; 689 690 return ocelot_port_mdb_del(ocelot, port, mdb); 691 } 692 693 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 694 u8 state) 695 { 696 struct ocelot *ocelot = ds->priv; 697 698 return ocelot_bridge_stp_state_set(ocelot, port, state); 699 } 700 701 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 702 struct switchdev_brport_flags val, 703 struct netlink_ext_ack *extack) 704 { 705 struct ocelot *ocelot = ds->priv; 706 707 return ocelot_port_pre_bridge_flags(ocelot, port, val); 708 } 709 710 static int felix_bridge_flags(struct dsa_switch *ds, int port, 711 struct switchdev_brport_flags val, 712 struct netlink_ext_ack *extack) 713 { 714 struct ocelot *ocelot = ds->priv; 715 716 ocelot_port_bridge_flags(ocelot, port, val); 717 718 return 0; 719 } 720 721 static int felix_bridge_join(struct dsa_switch *ds, int port, 722 struct dsa_bridge bridge, bool *tx_fwd_offload) 723 { 724 struct ocelot *ocelot = ds->priv; 725 726 ocelot_port_bridge_join(ocelot, port, bridge.dev); 727 728 return 0; 729 } 730 731 static void felix_bridge_leave(struct dsa_switch *ds, int port, 732 struct dsa_bridge bridge) 733 { 734 struct ocelot *ocelot = ds->priv; 735 736 ocelot_port_bridge_leave(ocelot, port, bridge.dev); 737 } 738 739 static int felix_lag_join(struct dsa_switch *ds, int port, 740 struct net_device *bond, 741 struct netdev_lag_upper_info *info) 742 { 743 struct ocelot *ocelot = ds->priv; 744 745 return ocelot_port_lag_join(ocelot, port, bond, info); 746 } 747 748 static int felix_lag_leave(struct dsa_switch *ds, int port, 749 struct net_device *bond) 750 { 751 struct ocelot *ocelot = ds->priv; 752 753 ocelot_port_lag_leave(ocelot, port, bond); 754 755 return 0; 756 } 757 758 static int felix_lag_change(struct dsa_switch *ds, int port) 759 { 760 struct dsa_port *dp = dsa_to_port(ds, port); 761 struct ocelot *ocelot = ds->priv; 762 763 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 764 765 return 0; 766 } 767 768 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 769 const struct switchdev_obj_port_vlan *vlan, 770 struct netlink_ext_ack *extack) 771 { 772 struct ocelot *ocelot = ds->priv; 773 u16 flags = vlan->flags; 774 775 /* Ocelot switches copy frames as-is to the CPU, so the flags: 776 * egress-untagged or not, pvid or not, make no difference. This 777 * behavior is already better than what DSA just tries to approximate 778 * when it installs the VLAN with the same flags on the CPU port. 779 * Just accept any configuration, and don't let ocelot deny installing 780 * multiple native VLANs on the NPI port, because the switch doesn't 781 * look at the port tag settings towards the NPI interface anyway. 782 */ 783 if (port == ocelot->npi) 784 return 0; 785 786 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 787 flags & BRIDGE_VLAN_INFO_PVID, 788 flags & BRIDGE_VLAN_INFO_UNTAGGED, 789 extack); 790 } 791 792 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 793 struct netlink_ext_ack *extack) 794 { 795 struct ocelot *ocelot = ds->priv; 796 797 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 798 } 799 800 static int felix_vlan_add(struct dsa_switch *ds, int port, 801 const struct switchdev_obj_port_vlan *vlan, 802 struct netlink_ext_ack *extack) 803 { 804 struct ocelot *ocelot = ds->priv; 805 u16 flags = vlan->flags; 806 int err; 807 808 err = felix_vlan_prepare(ds, port, vlan, extack); 809 if (err) 810 return err; 811 812 return ocelot_vlan_add(ocelot, port, vlan->vid, 813 flags & BRIDGE_VLAN_INFO_PVID, 814 flags & BRIDGE_VLAN_INFO_UNTAGGED); 815 } 816 817 static int felix_vlan_del(struct dsa_switch *ds, int port, 818 const struct switchdev_obj_port_vlan *vlan) 819 { 820 struct ocelot *ocelot = ds->priv; 821 822 return ocelot_vlan_del(ocelot, port, vlan->vid); 823 } 824 825 static void felix_phylink_validate(struct dsa_switch *ds, int port, 826 unsigned long *supported, 827 struct phylink_link_state *state) 828 { 829 struct ocelot *ocelot = ds->priv; 830 struct felix *felix = ocelot_to_felix(ocelot); 831 832 if (felix->info->phylink_validate) 833 felix->info->phylink_validate(ocelot, port, supported, state); 834 } 835 836 static void felix_phylink_mac_config(struct dsa_switch *ds, int port, 837 unsigned int link_an_mode, 838 const struct phylink_link_state *state) 839 { 840 struct ocelot *ocelot = ds->priv; 841 struct felix *felix = ocelot_to_felix(ocelot); 842 struct dsa_port *dp = dsa_to_port(ds, port); 843 844 if (felix->pcs && felix->pcs[port]) 845 phylink_set_pcs(dp->pl, felix->pcs[port]); 846 } 847 848 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 849 unsigned int link_an_mode, 850 phy_interface_t interface) 851 { 852 struct ocelot *ocelot = ds->priv; 853 854 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 855 FELIX_MAC_QUIRKS); 856 } 857 858 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 859 unsigned int link_an_mode, 860 phy_interface_t interface, 861 struct phy_device *phydev, 862 int speed, int duplex, 863 bool tx_pause, bool rx_pause) 864 { 865 struct ocelot *ocelot = ds->priv; 866 struct felix *felix = ocelot_to_felix(ocelot); 867 868 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 869 interface, speed, duplex, tx_pause, rx_pause, 870 FELIX_MAC_QUIRKS); 871 872 if (felix->info->port_sched_speed_set) 873 felix->info->port_sched_speed_set(ocelot, port, speed); 874 } 875 876 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 877 { 878 int i; 879 880 ocelot_rmw_gix(ocelot, 881 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 882 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 883 ANA_PORT_QOS_CFG, 884 port); 885 886 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 887 ocelot_rmw_ix(ocelot, 888 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 889 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 890 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 891 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 892 ANA_PORT_PCP_DEI_MAP, 893 port, i); 894 } 895 } 896 897 static void felix_get_strings(struct dsa_switch *ds, int port, 898 u32 stringset, u8 *data) 899 { 900 struct ocelot *ocelot = ds->priv; 901 902 return ocelot_get_strings(ocelot, port, stringset, data); 903 } 904 905 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 906 { 907 struct ocelot *ocelot = ds->priv; 908 909 ocelot_get_ethtool_stats(ocelot, port, data); 910 } 911 912 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 913 { 914 struct ocelot *ocelot = ds->priv; 915 916 return ocelot_get_sset_count(ocelot, port, sset); 917 } 918 919 static int felix_get_ts_info(struct dsa_switch *ds, int port, 920 struct ethtool_ts_info *info) 921 { 922 struct ocelot *ocelot = ds->priv; 923 924 return ocelot_get_ts_info(ocelot, port, info); 925 } 926 927 static int felix_parse_ports_node(struct felix *felix, 928 struct device_node *ports_node, 929 phy_interface_t *port_phy_modes) 930 { 931 struct ocelot *ocelot = &felix->ocelot; 932 struct device *dev = felix->ocelot.dev; 933 struct device_node *child; 934 935 for_each_available_child_of_node(ports_node, child) { 936 phy_interface_t phy_mode; 937 u32 port; 938 int err; 939 940 /* Get switch port number from DT */ 941 if (of_property_read_u32(child, "reg", &port) < 0) { 942 dev_err(dev, "Port number not defined in device tree " 943 "(property \"reg\")\n"); 944 of_node_put(child); 945 return -ENODEV; 946 } 947 948 /* Get PHY mode from DT */ 949 err = of_get_phy_mode(child, &phy_mode); 950 if (err) { 951 dev_err(dev, "Failed to read phy-mode or " 952 "phy-interface-type property for port %d\n", 953 port); 954 of_node_put(child); 955 return -ENODEV; 956 } 957 958 err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); 959 if (err < 0) { 960 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 961 phy_modes(phy_mode), port); 962 of_node_put(child); 963 return err; 964 } 965 966 port_phy_modes[port] = phy_mode; 967 } 968 969 return 0; 970 } 971 972 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 973 { 974 struct device *dev = felix->ocelot.dev; 975 struct device_node *switch_node; 976 struct device_node *ports_node; 977 int err; 978 979 switch_node = dev->of_node; 980 981 ports_node = of_get_child_by_name(switch_node, "ports"); 982 if (!ports_node) 983 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 984 if (!ports_node) { 985 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); 986 return -ENODEV; 987 } 988 989 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 990 of_node_put(ports_node); 991 992 return err; 993 } 994 995 static int felix_init_structs(struct felix *felix, int num_phys_ports) 996 { 997 struct ocelot *ocelot = &felix->ocelot; 998 phy_interface_t *port_phy_modes; 999 struct resource res; 1000 int port, i, err; 1001 1002 ocelot->num_phys_ports = num_phys_ports; 1003 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 1004 sizeof(struct ocelot_port *), GFP_KERNEL); 1005 if (!ocelot->ports) 1006 return -ENOMEM; 1007 1008 ocelot->map = felix->info->map; 1009 ocelot->stats_layout = felix->info->stats_layout; 1010 ocelot->num_stats = felix->info->num_stats; 1011 ocelot->num_mact_rows = felix->info->num_mact_rows; 1012 ocelot->vcap = felix->info->vcap; 1013 ocelot->vcap_pol.base = felix->info->vcap_pol_base; 1014 ocelot->vcap_pol.max = felix->info->vcap_pol_max; 1015 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; 1016 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; 1017 ocelot->ops = felix->info->ops; 1018 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1019 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1020 ocelot->devlink = felix->ds->devlink; 1021 1022 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1023 GFP_KERNEL); 1024 if (!port_phy_modes) 1025 return -ENOMEM; 1026 1027 err = felix_parse_dt(felix, port_phy_modes); 1028 if (err) { 1029 kfree(port_phy_modes); 1030 return err; 1031 } 1032 1033 for (i = 0; i < TARGET_MAX; i++) { 1034 struct regmap *target; 1035 1036 if (!felix->info->target_io_res[i].name) 1037 continue; 1038 1039 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1040 res.flags = IORESOURCE_MEM; 1041 res.start += felix->switch_base; 1042 res.end += felix->switch_base; 1043 1044 target = felix->info->init_regmap(ocelot, &res); 1045 if (IS_ERR(target)) { 1046 dev_err(ocelot->dev, 1047 "Failed to map device memory space\n"); 1048 kfree(port_phy_modes); 1049 return PTR_ERR(target); 1050 } 1051 1052 ocelot->targets[i] = target; 1053 } 1054 1055 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1056 if (err) { 1057 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1058 kfree(port_phy_modes); 1059 return err; 1060 } 1061 1062 for (port = 0; port < num_phys_ports; port++) { 1063 struct ocelot_port *ocelot_port; 1064 struct regmap *target; 1065 1066 ocelot_port = devm_kzalloc(ocelot->dev, 1067 sizeof(struct ocelot_port), 1068 GFP_KERNEL); 1069 if (!ocelot_port) { 1070 dev_err(ocelot->dev, 1071 "failed to allocate port memory\n"); 1072 kfree(port_phy_modes); 1073 return -ENOMEM; 1074 } 1075 1076 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1077 res.flags = IORESOURCE_MEM; 1078 res.start += felix->switch_base; 1079 res.end += felix->switch_base; 1080 1081 target = felix->info->init_regmap(ocelot, &res); 1082 if (IS_ERR(target)) { 1083 dev_err(ocelot->dev, 1084 "Failed to map memory space for port %d\n", 1085 port); 1086 kfree(port_phy_modes); 1087 return PTR_ERR(target); 1088 } 1089 1090 ocelot_port->phy_mode = port_phy_modes[port]; 1091 ocelot_port->ocelot = ocelot; 1092 ocelot_port->target = target; 1093 ocelot->ports[port] = ocelot_port; 1094 } 1095 1096 kfree(port_phy_modes); 1097 1098 if (felix->info->mdio_bus_alloc) { 1099 err = felix->info->mdio_bus_alloc(ocelot); 1100 if (err < 0) 1101 return err; 1102 } 1103 1104 return 0; 1105 } 1106 1107 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1108 struct sk_buff *skb) 1109 { 1110 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1111 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1112 struct sk_buff *skb_match = NULL, *skb_tmp; 1113 unsigned long flags; 1114 1115 if (!clone) 1116 return; 1117 1118 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1119 1120 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1121 if (skb != clone) 1122 continue; 1123 __skb_unlink(skb, &ocelot_port->tx_skbs); 1124 skb_match = skb; 1125 break; 1126 } 1127 1128 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1129 1130 WARN_ONCE(!skb_match, 1131 "Could not find skb clone in TX timestamping list\n"); 1132 } 1133 1134 #define work_to_xmit_work(w) \ 1135 container_of((w), struct felix_deferred_xmit_work, work) 1136 1137 static void felix_port_deferred_xmit(struct kthread_work *work) 1138 { 1139 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1140 struct dsa_switch *ds = xmit_work->dp->ds; 1141 struct sk_buff *skb = xmit_work->skb; 1142 u32 rew_op = ocelot_ptp_rew_op(skb); 1143 struct ocelot *ocelot = ds->priv; 1144 int port = xmit_work->dp->index; 1145 int retries = 10; 1146 1147 do { 1148 if (ocelot_can_inject(ocelot, 0)) 1149 break; 1150 1151 cpu_relax(); 1152 } while (--retries); 1153 1154 if (!retries) { 1155 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1156 port); 1157 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1158 kfree_skb(skb); 1159 return; 1160 } 1161 1162 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1163 1164 consume_skb(skb); 1165 kfree(xmit_work); 1166 } 1167 1168 static int felix_connect_tag_protocol(struct dsa_switch *ds, 1169 enum dsa_tag_protocol proto) 1170 { 1171 struct ocelot_8021q_tagger_data *tagger_data; 1172 1173 switch (proto) { 1174 case DSA_TAG_PROTO_OCELOT_8021Q: 1175 tagger_data = ocelot_8021q_tagger_data(ds); 1176 tagger_data->xmit_work_fn = felix_port_deferred_xmit; 1177 return 0; 1178 case DSA_TAG_PROTO_OCELOT: 1179 case DSA_TAG_PROTO_SEVILLE: 1180 return 0; 1181 default: 1182 return -EPROTONOSUPPORT; 1183 } 1184 } 1185 1186 /* Hardware initialization done here so that we can allocate structures with 1187 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1188 * us to allocate structures twice (leak memory) and map PCI memory twice 1189 * (which will not work). 1190 */ 1191 static int felix_setup(struct dsa_switch *ds) 1192 { 1193 struct ocelot *ocelot = ds->priv; 1194 struct felix *felix = ocelot_to_felix(ocelot); 1195 int port, err; 1196 1197 err = felix_init_structs(felix, ds->num_ports); 1198 if (err) 1199 return err; 1200 1201 err = ocelot_init(ocelot); 1202 if (err) 1203 goto out_mdiobus_free; 1204 1205 if (ocelot->ptp) { 1206 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1207 if (err) { 1208 dev_err(ocelot->dev, 1209 "Timestamp initialization failed\n"); 1210 ocelot->ptp = 0; 1211 } 1212 } 1213 1214 for (port = 0; port < ds->num_ports; port++) { 1215 if (dsa_is_unused_port(ds, port)) 1216 continue; 1217 1218 ocelot_init_port(ocelot, port); 1219 1220 /* Set the default QoS Classification based on PCP and DEI 1221 * bits of vlan tag. 1222 */ 1223 felix_port_qos_map_init(ocelot, port); 1224 } 1225 1226 err = ocelot_devlink_sb_register(ocelot); 1227 if (err) 1228 goto out_deinit_ports; 1229 1230 for (port = 0; port < ds->num_ports; port++) { 1231 if (!dsa_is_cpu_port(ds, port)) 1232 continue; 1233 1234 /* The initial tag protocol is NPI which always returns 0, so 1235 * there's no real point in checking for errors. 1236 */ 1237 felix_set_tag_protocol(ds, port, felix->tag_proto); 1238 break; 1239 } 1240 1241 ds->mtu_enforcement_ingress = true; 1242 ds->assisted_learning_on_cpu_port = true; 1243 1244 return 0; 1245 1246 out_deinit_ports: 1247 for (port = 0; port < ocelot->num_phys_ports; port++) { 1248 if (dsa_is_unused_port(ds, port)) 1249 continue; 1250 1251 ocelot_deinit_port(ocelot, port); 1252 } 1253 1254 ocelot_deinit_timestamp(ocelot); 1255 ocelot_deinit(ocelot); 1256 1257 out_mdiobus_free: 1258 if (felix->info->mdio_bus_free) 1259 felix->info->mdio_bus_free(ocelot); 1260 1261 return err; 1262 } 1263 1264 static void felix_teardown(struct dsa_switch *ds) 1265 { 1266 struct ocelot *ocelot = ds->priv; 1267 struct felix *felix = ocelot_to_felix(ocelot); 1268 int port; 1269 1270 for (port = 0; port < ds->num_ports; port++) { 1271 if (!dsa_is_cpu_port(ds, port)) 1272 continue; 1273 1274 felix_del_tag_protocol(ds, port, felix->tag_proto); 1275 break; 1276 } 1277 1278 for (port = 0; port < ocelot->num_phys_ports; port++) { 1279 if (dsa_is_unused_port(ds, port)) 1280 continue; 1281 1282 ocelot_deinit_port(ocelot, port); 1283 } 1284 1285 ocelot_devlink_sb_unregister(ocelot); 1286 ocelot_deinit_timestamp(ocelot); 1287 ocelot_deinit(ocelot); 1288 1289 if (felix->info->mdio_bus_free) 1290 felix->info->mdio_bus_free(ocelot); 1291 } 1292 1293 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1294 struct ifreq *ifr) 1295 { 1296 struct ocelot *ocelot = ds->priv; 1297 1298 return ocelot_hwstamp_get(ocelot, port, ifr); 1299 } 1300 1301 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1302 struct ifreq *ifr) 1303 { 1304 struct ocelot *ocelot = ds->priv; 1305 1306 return ocelot_hwstamp_set(ocelot, port, ifr); 1307 } 1308 1309 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) 1310 { 1311 struct felix *felix = ocelot_to_felix(ocelot); 1312 int err, grp = 0; 1313 1314 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1315 return false; 1316 1317 if (!felix->info->quirk_no_xtr_irq) 1318 return false; 1319 1320 if (ptp_type == PTP_CLASS_NONE) 1321 return false; 1322 1323 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1324 struct sk_buff *skb; 1325 unsigned int type; 1326 1327 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1328 if (err) 1329 goto out; 1330 1331 /* We trap to the CPU port module all PTP frames, but 1332 * felix_rxtstamp() only gets called for event frames. 1333 * So we need to avoid sending duplicate general 1334 * message frames by running a second BPF classifier 1335 * here and dropping those. 1336 */ 1337 __skb_push(skb, ETH_HLEN); 1338 1339 type = ptp_classify_raw(skb); 1340 1341 __skb_pull(skb, ETH_HLEN); 1342 1343 if (type == PTP_CLASS_NONE) { 1344 kfree_skb(skb); 1345 continue; 1346 } 1347 1348 netif_rx(skb); 1349 } 1350 1351 out: 1352 if (err < 0) 1353 ocelot_drain_cpu_queue(ocelot, 0); 1354 1355 return true; 1356 } 1357 1358 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1359 struct sk_buff *skb, unsigned int type) 1360 { 1361 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; 1362 struct skb_shared_hwtstamps *shhwtstamps; 1363 struct ocelot *ocelot = ds->priv; 1364 struct timespec64 ts; 1365 u32 tstamp_hi; 1366 u64 tstamp; 1367 1368 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1369 * for RX timestamping. Then free it, and poll for its copy through 1370 * MMIO in the CPU port module, and inject that into the stack from 1371 * ocelot_xtr_poll(). 1372 */ 1373 if (felix_check_xtr_pkt(ocelot, type)) { 1374 kfree_skb(skb); 1375 return true; 1376 } 1377 1378 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1379 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1380 1381 tstamp_hi = tstamp >> 32; 1382 if ((tstamp & 0xffffffff) < tstamp_lo) 1383 tstamp_hi--; 1384 1385 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1386 1387 shhwtstamps = skb_hwtstamps(skb); 1388 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1389 shhwtstamps->hwtstamp = tstamp; 1390 return false; 1391 } 1392 1393 static void felix_txtstamp(struct dsa_switch *ds, int port, 1394 struct sk_buff *skb) 1395 { 1396 struct ocelot *ocelot = ds->priv; 1397 struct sk_buff *clone = NULL; 1398 1399 if (!ocelot->ptp) 1400 return; 1401 1402 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1403 dev_err_ratelimited(ds->dev, 1404 "port %d delivering skb without TX timestamp\n", 1405 port); 1406 return; 1407 } 1408 1409 if (clone) 1410 OCELOT_SKB_CB(skb)->clone = clone; 1411 } 1412 1413 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1414 { 1415 struct ocelot *ocelot = ds->priv; 1416 1417 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1418 1419 return 0; 1420 } 1421 1422 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1423 { 1424 struct ocelot *ocelot = ds->priv; 1425 1426 return ocelot_get_max_mtu(ocelot, port); 1427 } 1428 1429 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1430 struct flow_cls_offload *cls, bool ingress) 1431 { 1432 struct ocelot *ocelot = ds->priv; 1433 1434 return ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1435 } 1436 1437 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1438 struct flow_cls_offload *cls, bool ingress) 1439 { 1440 struct ocelot *ocelot = ds->priv; 1441 1442 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1443 } 1444 1445 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1446 struct flow_cls_offload *cls, bool ingress) 1447 { 1448 struct ocelot *ocelot = ds->priv; 1449 1450 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1451 } 1452 1453 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1454 struct dsa_mall_policer_tc_entry *policer) 1455 { 1456 struct ocelot *ocelot = ds->priv; 1457 struct ocelot_policer pol = { 1458 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1459 .burst = policer->burst, 1460 }; 1461 1462 return ocelot_port_policer_add(ocelot, port, &pol); 1463 } 1464 1465 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1466 { 1467 struct ocelot *ocelot = ds->priv; 1468 1469 ocelot_port_policer_del(ocelot, port); 1470 } 1471 1472 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1473 enum tc_setup_type type, 1474 void *type_data) 1475 { 1476 struct ocelot *ocelot = ds->priv; 1477 struct felix *felix = ocelot_to_felix(ocelot); 1478 1479 if (felix->info->port_setup_tc) 1480 return felix->info->port_setup_tc(ds, port, type, type_data); 1481 else 1482 return -EOPNOTSUPP; 1483 } 1484 1485 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1486 u16 pool_index, 1487 struct devlink_sb_pool_info *pool_info) 1488 { 1489 struct ocelot *ocelot = ds->priv; 1490 1491 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1492 } 1493 1494 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1495 u16 pool_index, u32 size, 1496 enum devlink_sb_threshold_type threshold_type, 1497 struct netlink_ext_ack *extack) 1498 { 1499 struct ocelot *ocelot = ds->priv; 1500 1501 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1502 threshold_type, extack); 1503 } 1504 1505 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1506 unsigned int sb_index, u16 pool_index, 1507 u32 *p_threshold) 1508 { 1509 struct ocelot *ocelot = ds->priv; 1510 1511 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1512 p_threshold); 1513 } 1514 1515 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1516 unsigned int sb_index, u16 pool_index, 1517 u32 threshold, struct netlink_ext_ack *extack) 1518 { 1519 struct ocelot *ocelot = ds->priv; 1520 1521 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1522 threshold, extack); 1523 } 1524 1525 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1526 unsigned int sb_index, u16 tc_index, 1527 enum devlink_sb_pool_type pool_type, 1528 u16 *p_pool_index, u32 *p_threshold) 1529 { 1530 struct ocelot *ocelot = ds->priv; 1531 1532 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1533 pool_type, p_pool_index, 1534 p_threshold); 1535 } 1536 1537 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1538 unsigned int sb_index, u16 tc_index, 1539 enum devlink_sb_pool_type pool_type, 1540 u16 pool_index, u32 threshold, 1541 struct netlink_ext_ack *extack) 1542 { 1543 struct ocelot *ocelot = ds->priv; 1544 1545 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1546 pool_type, pool_index, threshold, 1547 extack); 1548 } 1549 1550 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1551 unsigned int sb_index) 1552 { 1553 struct ocelot *ocelot = ds->priv; 1554 1555 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1556 } 1557 1558 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1559 unsigned int sb_index) 1560 { 1561 struct ocelot *ocelot = ds->priv; 1562 1563 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1564 } 1565 1566 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1567 unsigned int sb_index, u16 pool_index, 1568 u32 *p_cur, u32 *p_max) 1569 { 1570 struct ocelot *ocelot = ds->priv; 1571 1572 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1573 p_cur, p_max); 1574 } 1575 1576 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1577 unsigned int sb_index, u16 tc_index, 1578 enum devlink_sb_pool_type pool_type, 1579 u32 *p_cur, u32 *p_max) 1580 { 1581 struct ocelot *ocelot = ds->priv; 1582 1583 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1584 pool_type, p_cur, p_max); 1585 } 1586 1587 static int felix_mrp_add(struct dsa_switch *ds, int port, 1588 const struct switchdev_obj_mrp *mrp) 1589 { 1590 struct ocelot *ocelot = ds->priv; 1591 1592 return ocelot_mrp_add(ocelot, port, mrp); 1593 } 1594 1595 static int felix_mrp_del(struct dsa_switch *ds, int port, 1596 const struct switchdev_obj_mrp *mrp) 1597 { 1598 struct ocelot *ocelot = ds->priv; 1599 1600 return ocelot_mrp_add(ocelot, port, mrp); 1601 } 1602 1603 static int 1604 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1605 const struct switchdev_obj_ring_role_mrp *mrp) 1606 { 1607 struct ocelot *ocelot = ds->priv; 1608 1609 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1610 } 1611 1612 static int 1613 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1614 const struct switchdev_obj_ring_role_mrp *mrp) 1615 { 1616 struct ocelot *ocelot = ds->priv; 1617 1618 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1619 } 1620 1621 const struct dsa_switch_ops felix_switch_ops = { 1622 .get_tag_protocol = felix_get_tag_protocol, 1623 .change_tag_protocol = felix_change_tag_protocol, 1624 .connect_tag_protocol = felix_connect_tag_protocol, 1625 .setup = felix_setup, 1626 .teardown = felix_teardown, 1627 .set_ageing_time = felix_set_ageing_time, 1628 .get_strings = felix_get_strings, 1629 .get_ethtool_stats = felix_get_ethtool_stats, 1630 .get_sset_count = felix_get_sset_count, 1631 .get_ts_info = felix_get_ts_info, 1632 .phylink_validate = felix_phylink_validate, 1633 .phylink_mac_config = felix_phylink_mac_config, 1634 .phylink_mac_link_down = felix_phylink_mac_link_down, 1635 .phylink_mac_link_up = felix_phylink_mac_link_up, 1636 .port_fast_age = felix_port_fast_age, 1637 .port_fdb_dump = felix_fdb_dump, 1638 .port_fdb_add = felix_fdb_add, 1639 .port_fdb_del = felix_fdb_del, 1640 .port_mdb_add = felix_mdb_add, 1641 .port_mdb_del = felix_mdb_del, 1642 .port_pre_bridge_flags = felix_pre_bridge_flags, 1643 .port_bridge_flags = felix_bridge_flags, 1644 .port_bridge_join = felix_bridge_join, 1645 .port_bridge_leave = felix_bridge_leave, 1646 .port_lag_join = felix_lag_join, 1647 .port_lag_leave = felix_lag_leave, 1648 .port_lag_change = felix_lag_change, 1649 .port_stp_state_set = felix_bridge_stp_state_set, 1650 .port_vlan_filtering = felix_vlan_filtering, 1651 .port_vlan_add = felix_vlan_add, 1652 .port_vlan_del = felix_vlan_del, 1653 .port_hwtstamp_get = felix_hwtstamp_get, 1654 .port_hwtstamp_set = felix_hwtstamp_set, 1655 .port_rxtstamp = felix_rxtstamp, 1656 .port_txtstamp = felix_txtstamp, 1657 .port_change_mtu = felix_change_mtu, 1658 .port_max_mtu = felix_get_max_mtu, 1659 .port_policer_add = felix_port_policer_add, 1660 .port_policer_del = felix_port_policer_del, 1661 .cls_flower_add = felix_cls_flower_add, 1662 .cls_flower_del = felix_cls_flower_del, 1663 .cls_flower_stats = felix_cls_flower_stats, 1664 .port_setup_tc = felix_port_setup_tc, 1665 .devlink_sb_pool_get = felix_sb_pool_get, 1666 .devlink_sb_pool_set = felix_sb_pool_set, 1667 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1668 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1669 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1670 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1671 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1672 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1673 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1674 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1675 .port_mrp_add = felix_mrp_add, 1676 .port_mrp_del = felix_mrp_del, 1677 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1678 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1679 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1680 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1681 }; 1682 1683 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1684 { 1685 struct felix *felix = ocelot_to_felix(ocelot); 1686 struct dsa_switch *ds = felix->ds; 1687 1688 if (!dsa_is_user_port(ds, port)) 1689 return NULL; 1690 1691 return dsa_to_port(ds, port)->slave; 1692 } 1693 1694 int felix_netdev_to_port(struct net_device *dev) 1695 { 1696 struct dsa_port *dp; 1697 1698 dp = dsa_port_from_netdev(dev); 1699 if (IS_ERR(dp)) 1700 return -EINVAL; 1701 1702 return dp->index; 1703 } 1704