1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP Semiconductors 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <linux/pcs-lynx.h> 25 #include <net/pkt_sched.h> 26 #include <net/dsa.h> 27 #include "felix.h" 28 29 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, 30 bool pvid, bool untagged) 31 { 32 struct ocelot_vcap_filter *outer_tagging_rule; 33 struct ocelot *ocelot = &felix->ocelot; 34 struct dsa_switch *ds = felix->ds; 35 int key_length, upstream, err; 36 37 /* We don't need to install the rxvlan into the other ports' filtering 38 * tables, because we're just pushing the rxvlan when sending towards 39 * the CPU 40 */ 41 if (!pvid) 42 return 0; 43 44 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 45 upstream = dsa_upstream_port(ds, port); 46 47 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 48 GFP_KERNEL); 49 if (!outer_tagging_rule) 50 return -ENOMEM; 51 52 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 53 outer_tagging_rule->prio = 1; 54 outer_tagging_rule->id.cookie = port; 55 outer_tagging_rule->id.tc_offload = false; 56 outer_tagging_rule->block_id = VCAP_ES0; 57 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 58 outer_tagging_rule->lookup = 0; 59 outer_tagging_rule->ingress_port.value = port; 60 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 61 outer_tagging_rule->egress_port.value = upstream; 62 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 63 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 64 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 65 outer_tagging_rule->action.tag_a_vid_sel = 1; 66 outer_tagging_rule->action.vid_a_val = vid; 67 68 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 69 if (err) 70 kfree(outer_tagging_rule); 71 72 return err; 73 } 74 75 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, 76 bool pvid, bool untagged) 77 { 78 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 79 struct ocelot *ocelot = &felix->ocelot; 80 struct dsa_switch *ds = felix->ds; 81 int upstream, err; 82 83 /* tag_8021q.c assumes we are implementing this via port VLAN 84 * membership, which we aren't. So we don't need to add any VCAP filter 85 * for the CPU port. 86 */ 87 if (ocelot->ports[port]->is_dsa_8021q_cpu) 88 return 0; 89 90 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 91 if (!untagging_rule) 92 return -ENOMEM; 93 94 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 95 if (!redirect_rule) { 96 kfree(untagging_rule); 97 return -ENOMEM; 98 } 99 100 upstream = dsa_upstream_port(ds, port); 101 102 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 103 untagging_rule->ingress_port_mask = BIT(upstream); 104 untagging_rule->vlan.vid.value = vid; 105 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 106 untagging_rule->prio = 1; 107 untagging_rule->id.cookie = port; 108 untagging_rule->id.tc_offload = false; 109 untagging_rule->block_id = VCAP_IS1; 110 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 111 untagging_rule->lookup = 0; 112 untagging_rule->action.vlan_pop_cnt_ena = true; 113 untagging_rule->action.vlan_pop_cnt = 1; 114 untagging_rule->action.pag_override_mask = 0xff; 115 untagging_rule->action.pag_val = port; 116 117 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 118 if (err) { 119 kfree(untagging_rule); 120 kfree(redirect_rule); 121 return err; 122 } 123 124 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 125 redirect_rule->ingress_port_mask = BIT(upstream); 126 redirect_rule->pag = port; 127 redirect_rule->prio = 1; 128 redirect_rule->id.cookie = port; 129 redirect_rule->id.tc_offload = false; 130 redirect_rule->block_id = VCAP_IS2; 131 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 132 redirect_rule->lookup = 0; 133 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 134 redirect_rule->action.port_mask = BIT(port); 135 136 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 137 if (err) { 138 ocelot_vcap_filter_del(ocelot, untagging_rule); 139 kfree(redirect_rule); 140 return err; 141 } 142 143 return 0; 144 } 145 146 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 147 u16 flags) 148 { 149 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 150 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 151 struct ocelot *ocelot = ds->priv; 152 153 if (vid_is_dsa_8021q_rxvlan(vid)) 154 return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), 155 port, vid, pvid, untagged); 156 157 if (vid_is_dsa_8021q_txvlan(vid)) 158 return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), 159 port, vid, pvid, untagged); 160 161 return 0; 162 } 163 164 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) 165 { 166 struct ocelot_vcap_filter *outer_tagging_rule; 167 struct ocelot_vcap_block *block_vcap_es0; 168 struct ocelot *ocelot = &felix->ocelot; 169 170 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 171 172 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 173 port, false); 174 /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid 175 * installing outer tagging ES0 rules where they weren't needed. 176 * But in rxvlan_del, the API doesn't give us the "flags" anymore, 177 * so that forces us to be slightly sloppy here, and just assume that 178 * if we didn't find an outer_tagging_rule it means that there was 179 * none in the first place, i.e. rxvlan_del is called on a non-pvid 180 * port. This is most probably true though. 181 */ 182 if (!outer_tagging_rule) 183 return 0; 184 185 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 186 } 187 188 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) 189 { 190 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 191 struct ocelot_vcap_block *block_vcap_is1; 192 struct ocelot_vcap_block *block_vcap_is2; 193 struct ocelot *ocelot = &felix->ocelot; 194 int err; 195 196 if (ocelot->ports[port]->is_dsa_8021q_cpu) 197 return 0; 198 199 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 200 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 201 202 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 203 port, false); 204 if (!untagging_rule) 205 return 0; 206 207 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 208 if (err) 209 return err; 210 211 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 212 port, false); 213 if (!redirect_rule) 214 return 0; 215 216 return ocelot_vcap_filter_del(ocelot, redirect_rule); 217 } 218 219 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 220 { 221 struct ocelot *ocelot = ds->priv; 222 223 if (vid_is_dsa_8021q_rxvlan(vid)) 224 return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), 225 port, vid); 226 227 if (vid_is_dsa_8021q_txvlan(vid)) 228 return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), 229 port, vid); 230 231 return 0; 232 } 233 234 /* Alternatively to using the NPI functionality, that same hardware MAC 235 * connected internally to the enetc or fman DSA master can be configured to 236 * use the software-defined tag_8021q frame format. As far as the hardware is 237 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 238 * module are now disconnected from it, but can still be accessed through 239 * register-based MMIO. 240 */ 241 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 242 { 243 ocelot->ports[port]->is_dsa_8021q_cpu = true; 244 ocelot->npi = -1; 245 246 /* Overwrite PGID_CPU with the non-tagging port */ 247 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 248 249 ocelot_apply_bridge_fwd_mask(ocelot); 250 } 251 252 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 253 { 254 ocelot->ports[port]->is_dsa_8021q_cpu = false; 255 256 /* Restore PGID_CPU */ 257 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 258 PGID_CPU); 259 260 ocelot_apply_bridge_fwd_mask(ocelot); 261 } 262 263 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. 264 * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the 265 * tag_8021q CPU port. 266 */ 267 static int felix_setup_mmio_filtering(struct felix *felix) 268 { 269 unsigned long user_ports = 0, cpu_ports = 0; 270 struct ocelot_vcap_filter *redirect_rule; 271 struct ocelot_vcap_filter *tagging_rule; 272 struct ocelot *ocelot = &felix->ocelot; 273 struct dsa_switch *ds = felix->ds; 274 int port, ret; 275 276 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 277 if (!tagging_rule) 278 return -ENOMEM; 279 280 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 281 if (!redirect_rule) { 282 kfree(tagging_rule); 283 return -ENOMEM; 284 } 285 286 for (port = 0; port < ocelot->num_phys_ports; port++) { 287 if (dsa_is_user_port(ds, port)) 288 user_ports |= BIT(port); 289 if (dsa_is_cpu_port(ds, port)) 290 cpu_ports |= BIT(port); 291 } 292 293 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; 294 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); 295 *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); 296 tagging_rule->ingress_port_mask = user_ports; 297 tagging_rule->prio = 1; 298 tagging_rule->id.cookie = ocelot->num_phys_ports; 299 tagging_rule->id.tc_offload = false; 300 tagging_rule->block_id = VCAP_IS1; 301 tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 302 tagging_rule->lookup = 0; 303 tagging_rule->action.pag_override_mask = 0xff; 304 tagging_rule->action.pag_val = ocelot->num_phys_ports; 305 306 ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); 307 if (ret) { 308 kfree(tagging_rule); 309 kfree(redirect_rule); 310 return ret; 311 } 312 313 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 314 redirect_rule->ingress_port_mask = user_ports; 315 redirect_rule->pag = ocelot->num_phys_ports; 316 redirect_rule->prio = 1; 317 redirect_rule->id.cookie = ocelot->num_phys_ports; 318 redirect_rule->id.tc_offload = false; 319 redirect_rule->block_id = VCAP_IS2; 320 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 321 redirect_rule->lookup = 0; 322 redirect_rule->action.cpu_copy_ena = true; 323 if (felix->info->quirk_no_xtr_irq) { 324 /* Redirect to the tag_8021q CPU but also copy PTP packets to 325 * the CPU port module 326 */ 327 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 328 redirect_rule->action.port_mask = cpu_ports; 329 } else { 330 /* Trap PTP packets only to the CPU port module (which is 331 * redirected to the NPI port) 332 */ 333 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 334 redirect_rule->action.port_mask = 0; 335 } 336 337 ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 338 if (ret) { 339 ocelot_vcap_filter_del(ocelot, tagging_rule); 340 kfree(redirect_rule); 341 return ret; 342 } 343 344 /* The ownership of the CPU port module's queues might have just been 345 * transferred to the tag_8021q tagger from the NPI-based tagger. 346 * So there might still be all sorts of crap in the queues. On the 347 * other hand, the MMIO-based matching of PTP frames is very brittle, 348 * so we need to be careful that there are no extra frames to be 349 * dequeued over MMIO, since we would never know to discard them. 350 */ 351 ocelot_drain_cpu_queue(ocelot, 0); 352 353 return 0; 354 } 355 356 static int felix_teardown_mmio_filtering(struct felix *felix) 357 { 358 struct ocelot_vcap_filter *tagging_rule, *redirect_rule; 359 struct ocelot_vcap_block *block_vcap_is1; 360 struct ocelot_vcap_block *block_vcap_is2; 361 struct ocelot *ocelot = &felix->ocelot; 362 int err; 363 364 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 365 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 366 367 tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 368 ocelot->num_phys_ports, 369 false); 370 if (!tagging_rule) 371 return -ENOENT; 372 373 err = ocelot_vcap_filter_del(ocelot, tagging_rule); 374 if (err) 375 return err; 376 377 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 378 ocelot->num_phys_ports, 379 false); 380 if (!redirect_rule) 381 return -ENOENT; 382 383 return ocelot_vcap_filter_del(ocelot, redirect_rule); 384 } 385 386 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) 387 { 388 struct ocelot *ocelot = ds->priv; 389 struct felix *felix = ocelot_to_felix(ocelot); 390 unsigned long cpu_flood; 391 int port, err; 392 393 felix_8021q_cpu_port_init(ocelot, cpu); 394 395 for (port = 0; port < ds->num_ports; port++) { 396 if (dsa_is_unused_port(ds, port)) 397 continue; 398 399 /* This overwrites ocelot_init(): 400 * Do not forward BPDU frames to the CPU port module, 401 * for 2 reasons: 402 * - When these packets are injected from the tag_8021q 403 * CPU port, we want them to go out, not loop back 404 * into the system. 405 * - STP traffic ingressing on a user port should go to 406 * the tag_8021q CPU port, not to the hardware CPU 407 * port module. 408 */ 409 ocelot_write_gix(ocelot, 410 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 411 ANA_PORT_CPU_FWD_BPDU_CFG, port); 412 } 413 414 /* In tag_8021q mode, the CPU port module is unused, except for PTP 415 * frames. So we want to disable flooding of any kind to the CPU port 416 * module, since packets going there will end in a black hole. 417 */ 418 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 419 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); 420 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); 421 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); 422 423 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 424 if (err) 425 return err; 426 427 err = felix_setup_mmio_filtering(felix); 428 if (err) 429 goto out_tag_8021q_unregister; 430 431 return 0; 432 433 out_tag_8021q_unregister: 434 dsa_tag_8021q_unregister(ds); 435 return err; 436 } 437 438 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) 439 { 440 struct ocelot *ocelot = ds->priv; 441 struct felix *felix = ocelot_to_felix(ocelot); 442 int err, port; 443 444 err = felix_teardown_mmio_filtering(felix); 445 if (err) 446 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", 447 err); 448 449 dsa_tag_8021q_unregister(ds); 450 451 for (port = 0; port < ds->num_ports; port++) { 452 if (dsa_is_unused_port(ds, port)) 453 continue; 454 455 /* Restore the logic from ocelot_init: 456 * do not forward BPDU frames to the front ports. 457 */ 458 ocelot_write_gix(ocelot, 459 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 460 ANA_PORT_CPU_FWD_BPDU_CFG, 461 port); 462 } 463 464 felix_8021q_cpu_port_deinit(ocelot, cpu); 465 } 466 467 /* The CPU port module is connected to the Node Processor Interface (NPI). This 468 * is the mode through which frames can be injected from and extracted to an 469 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 470 * running Linux, and this forms a DSA setup together with the enetc or fman 471 * DSA master. 472 */ 473 static void felix_npi_port_init(struct ocelot *ocelot, int port) 474 { 475 ocelot->npi = port; 476 477 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 478 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 479 QSYS_EXT_CPU_CFG); 480 481 /* NPI port Injection/Extraction configuration */ 482 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 483 ocelot->npi_xtr_prefix); 484 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 485 ocelot->npi_inj_prefix); 486 487 /* Disable transmission of pause frames */ 488 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 489 } 490 491 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 492 { 493 /* Restore hardware defaults */ 494 int unused_port = ocelot->num_phys_ports + 2; 495 496 ocelot->npi = -1; 497 498 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 499 QSYS_EXT_CPU_CFG); 500 501 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 502 OCELOT_TAG_PREFIX_DISABLED); 503 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 504 OCELOT_TAG_PREFIX_DISABLED); 505 506 /* Enable transmission of pause frames */ 507 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 508 } 509 510 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) 511 { 512 struct ocelot *ocelot = ds->priv; 513 unsigned long cpu_flood; 514 515 felix_npi_port_init(ocelot, cpu); 516 517 /* Include the CPU port module (and indirectly, the NPI port) 518 * in the forwarding mask for unknown unicast - the hardware 519 * default value for ANA_FLOODING_FLD_UNICAST excludes 520 * BIT(ocelot->num_phys_ports), and so does ocelot_init, 521 * since Ocelot relies on whitelisting MAC addresses towards 522 * PGID_CPU. 523 * We do this because DSA does not yet perform RX filtering, 524 * and the NPI port does not perform source address learning, 525 * so traffic sent to Linux is effectively unknown from the 526 * switch's perspective. 527 */ 528 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 529 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); 530 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); 531 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); 532 533 return 0; 534 } 535 536 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) 537 { 538 struct ocelot *ocelot = ds->priv; 539 540 felix_npi_port_deinit(ocelot, cpu); 541 } 542 543 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, 544 enum dsa_tag_protocol proto) 545 { 546 int err; 547 548 switch (proto) { 549 case DSA_TAG_PROTO_SEVILLE: 550 case DSA_TAG_PROTO_OCELOT: 551 err = felix_setup_tag_npi(ds, cpu); 552 break; 553 case DSA_TAG_PROTO_OCELOT_8021Q: 554 err = felix_setup_tag_8021q(ds, cpu); 555 break; 556 default: 557 err = -EPROTONOSUPPORT; 558 } 559 560 return err; 561 } 562 563 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, 564 enum dsa_tag_protocol proto) 565 { 566 switch (proto) { 567 case DSA_TAG_PROTO_SEVILLE: 568 case DSA_TAG_PROTO_OCELOT: 569 felix_teardown_tag_npi(ds, cpu); 570 break; 571 case DSA_TAG_PROTO_OCELOT_8021Q: 572 felix_teardown_tag_8021q(ds, cpu); 573 break; 574 default: 575 break; 576 } 577 } 578 579 /* This always leaves the switch in a consistent state, because although the 580 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 581 * or the restoration is guaranteed to work. 582 */ 583 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, 584 enum dsa_tag_protocol proto) 585 { 586 struct ocelot *ocelot = ds->priv; 587 struct felix *felix = ocelot_to_felix(ocelot); 588 enum dsa_tag_protocol old_proto = felix->tag_proto; 589 int err; 590 591 if (proto != DSA_TAG_PROTO_SEVILLE && 592 proto != DSA_TAG_PROTO_OCELOT && 593 proto != DSA_TAG_PROTO_OCELOT_8021Q) 594 return -EPROTONOSUPPORT; 595 596 felix_del_tag_protocol(ds, cpu, old_proto); 597 598 err = felix_set_tag_protocol(ds, cpu, proto); 599 if (err) { 600 felix_set_tag_protocol(ds, cpu, old_proto); 601 return err; 602 } 603 604 felix->tag_proto = proto; 605 606 return 0; 607 } 608 609 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 610 int port, 611 enum dsa_tag_protocol mp) 612 { 613 struct ocelot *ocelot = ds->priv; 614 struct felix *felix = ocelot_to_felix(ocelot); 615 616 return felix->tag_proto; 617 } 618 619 static int felix_set_ageing_time(struct dsa_switch *ds, 620 unsigned int ageing_time) 621 { 622 struct ocelot *ocelot = ds->priv; 623 624 ocelot_set_ageing_time(ocelot, ageing_time); 625 626 return 0; 627 } 628 629 static int felix_fdb_dump(struct dsa_switch *ds, int port, 630 dsa_fdb_dump_cb_t *cb, void *data) 631 { 632 struct ocelot *ocelot = ds->priv; 633 634 return ocelot_fdb_dump(ocelot, port, cb, data); 635 } 636 637 static int felix_fdb_add(struct dsa_switch *ds, int port, 638 const unsigned char *addr, u16 vid) 639 { 640 struct ocelot *ocelot = ds->priv; 641 642 return ocelot_fdb_add(ocelot, port, addr, vid); 643 } 644 645 static int felix_fdb_del(struct dsa_switch *ds, int port, 646 const unsigned char *addr, u16 vid) 647 { 648 struct ocelot *ocelot = ds->priv; 649 650 return ocelot_fdb_del(ocelot, port, addr, vid); 651 } 652 653 static int felix_mdb_add(struct dsa_switch *ds, int port, 654 const struct switchdev_obj_port_mdb *mdb) 655 { 656 struct ocelot *ocelot = ds->priv; 657 658 return ocelot_port_mdb_add(ocelot, port, mdb); 659 } 660 661 static int felix_mdb_del(struct dsa_switch *ds, int port, 662 const struct switchdev_obj_port_mdb *mdb) 663 { 664 struct ocelot *ocelot = ds->priv; 665 666 return ocelot_port_mdb_del(ocelot, port, mdb); 667 } 668 669 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 670 u8 state) 671 { 672 struct ocelot *ocelot = ds->priv; 673 674 return ocelot_bridge_stp_state_set(ocelot, port, state); 675 } 676 677 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 678 struct switchdev_brport_flags val, 679 struct netlink_ext_ack *extack) 680 { 681 struct ocelot *ocelot = ds->priv; 682 683 return ocelot_port_pre_bridge_flags(ocelot, port, val); 684 } 685 686 static int felix_bridge_flags(struct dsa_switch *ds, int port, 687 struct switchdev_brport_flags val, 688 struct netlink_ext_ack *extack) 689 { 690 struct ocelot *ocelot = ds->priv; 691 692 ocelot_port_bridge_flags(ocelot, port, val); 693 694 return 0; 695 } 696 697 static int felix_bridge_join(struct dsa_switch *ds, int port, 698 struct net_device *br) 699 { 700 struct ocelot *ocelot = ds->priv; 701 702 ocelot_port_bridge_join(ocelot, port, br); 703 704 return 0; 705 } 706 707 static void felix_bridge_leave(struct dsa_switch *ds, int port, 708 struct net_device *br) 709 { 710 struct ocelot *ocelot = ds->priv; 711 712 ocelot_port_bridge_leave(ocelot, port, br); 713 } 714 715 static int felix_lag_join(struct dsa_switch *ds, int port, 716 struct net_device *bond, 717 struct netdev_lag_upper_info *info) 718 { 719 struct ocelot *ocelot = ds->priv; 720 721 return ocelot_port_lag_join(ocelot, port, bond, info); 722 } 723 724 static int felix_lag_leave(struct dsa_switch *ds, int port, 725 struct net_device *bond) 726 { 727 struct ocelot *ocelot = ds->priv; 728 729 ocelot_port_lag_leave(ocelot, port, bond); 730 731 return 0; 732 } 733 734 static int felix_lag_change(struct dsa_switch *ds, int port) 735 { 736 struct dsa_port *dp = dsa_to_port(ds, port); 737 struct ocelot *ocelot = ds->priv; 738 739 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 740 741 return 0; 742 } 743 744 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 745 const struct switchdev_obj_port_vlan *vlan) 746 { 747 struct ocelot *ocelot = ds->priv; 748 u16 flags = vlan->flags; 749 750 /* Ocelot switches copy frames as-is to the CPU, so the flags: 751 * egress-untagged or not, pvid or not, make no difference. This 752 * behavior is already better than what DSA just tries to approximate 753 * when it installs the VLAN with the same flags on the CPU port. 754 * Just accept any configuration, and don't let ocelot deny installing 755 * multiple native VLANs on the NPI port, because the switch doesn't 756 * look at the port tag settings towards the NPI interface anyway. 757 */ 758 if (port == ocelot->npi) 759 return 0; 760 761 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 762 flags & BRIDGE_VLAN_INFO_PVID, 763 flags & BRIDGE_VLAN_INFO_UNTAGGED); 764 } 765 766 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 767 struct netlink_ext_ack *extack) 768 { 769 struct ocelot *ocelot = ds->priv; 770 771 return ocelot_port_vlan_filtering(ocelot, port, enabled); 772 } 773 774 static int felix_vlan_add(struct dsa_switch *ds, int port, 775 const struct switchdev_obj_port_vlan *vlan, 776 struct netlink_ext_ack *extack) 777 { 778 struct ocelot *ocelot = ds->priv; 779 u16 flags = vlan->flags; 780 int err; 781 782 err = felix_vlan_prepare(ds, port, vlan); 783 if (err) 784 return err; 785 786 return ocelot_vlan_add(ocelot, port, vlan->vid, 787 flags & BRIDGE_VLAN_INFO_PVID, 788 flags & BRIDGE_VLAN_INFO_UNTAGGED); 789 } 790 791 static int felix_vlan_del(struct dsa_switch *ds, int port, 792 const struct switchdev_obj_port_vlan *vlan) 793 { 794 struct ocelot *ocelot = ds->priv; 795 796 return ocelot_vlan_del(ocelot, port, vlan->vid); 797 } 798 799 static int felix_port_enable(struct dsa_switch *ds, int port, 800 struct phy_device *phy) 801 { 802 struct ocelot *ocelot = ds->priv; 803 804 ocelot_port_enable(ocelot, port, phy); 805 806 return 0; 807 } 808 809 static void felix_port_disable(struct dsa_switch *ds, int port) 810 { 811 struct ocelot *ocelot = ds->priv; 812 813 return ocelot_port_disable(ocelot, port); 814 } 815 816 static void felix_phylink_validate(struct dsa_switch *ds, int port, 817 unsigned long *supported, 818 struct phylink_link_state *state) 819 { 820 struct ocelot *ocelot = ds->priv; 821 struct felix *felix = ocelot_to_felix(ocelot); 822 823 if (felix->info->phylink_validate) 824 felix->info->phylink_validate(ocelot, port, supported, state); 825 } 826 827 static void felix_phylink_mac_config(struct dsa_switch *ds, int port, 828 unsigned int link_an_mode, 829 const struct phylink_link_state *state) 830 { 831 struct ocelot *ocelot = ds->priv; 832 struct felix *felix = ocelot_to_felix(ocelot); 833 struct dsa_port *dp = dsa_to_port(ds, port); 834 835 if (felix->pcs[port]) 836 phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); 837 } 838 839 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 840 unsigned int link_an_mode, 841 phy_interface_t interface) 842 { 843 struct ocelot *ocelot = ds->priv; 844 struct ocelot_port *ocelot_port = ocelot->ports[port]; 845 int err; 846 847 ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, 848 DEV_MAC_ENA_CFG); 849 850 ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); 851 852 err = ocelot_port_flush(ocelot, port); 853 if (err) 854 dev_err(ocelot->dev, "failed to flush port %d: %d\n", 855 port, err); 856 857 /* Put the port in reset. */ 858 ocelot_port_writel(ocelot_port, 859 DEV_CLOCK_CFG_MAC_TX_RST | 860 DEV_CLOCK_CFG_MAC_RX_RST | 861 DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), 862 DEV_CLOCK_CFG); 863 } 864 865 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 866 unsigned int link_an_mode, 867 phy_interface_t interface, 868 struct phy_device *phydev, 869 int speed, int duplex, 870 bool tx_pause, bool rx_pause) 871 { 872 struct ocelot *ocelot = ds->priv; 873 struct ocelot_port *ocelot_port = ocelot->ports[port]; 874 struct felix *felix = ocelot_to_felix(ocelot); 875 u32 mac_fc_cfg; 876 877 /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and 878 * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is 879 * integrated is that the MAC speed is fixed and it's the PCS who is 880 * performing the rate adaptation, so we have to write "1000Mbps" into 881 * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default 882 * value). 883 */ 884 ocelot_port_writel(ocelot_port, 885 DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), 886 DEV_CLOCK_CFG); 887 888 switch (speed) { 889 case SPEED_10: 890 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3); 891 break; 892 case SPEED_100: 893 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2); 894 break; 895 case SPEED_1000: 896 case SPEED_2500: 897 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1); 898 break; 899 default: 900 dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", 901 port, speed); 902 return; 903 } 904 905 /* handle Rx pause in all cases, with 2500base-X this is used for rate 906 * adaptation. 907 */ 908 mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; 909 910 if (tx_pause) 911 mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | 912 SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | 913 SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | 914 SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; 915 916 /* Flow control. Link speed is only used here to evaluate the time 917 * specification in incoming pause frames. 918 */ 919 ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); 920 921 ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); 922 923 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); 924 925 /* Undo the effects of felix_phylink_mac_link_down: 926 * enable MAC module 927 */ 928 ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | 929 DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); 930 931 /* Enable receiving frames on the port, and activate auto-learning of 932 * MAC addresses. 933 */ 934 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | 935 ANA_PORT_PORT_CFG_RECV_ENA | 936 ANA_PORT_PORT_CFG_PORTID_VAL(port), 937 ANA_PORT_PORT_CFG, port); 938 939 /* Core: Enable port for frame transfer */ 940 ocelot_fields_write(ocelot, port, 941 QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); 942 943 if (felix->info->port_sched_speed_set) 944 felix->info->port_sched_speed_set(ocelot, port, speed); 945 } 946 947 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 948 { 949 int i; 950 951 ocelot_rmw_gix(ocelot, 952 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 953 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 954 ANA_PORT_QOS_CFG, 955 port); 956 957 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 958 ocelot_rmw_ix(ocelot, 959 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 960 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 961 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 962 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 963 ANA_PORT_PCP_DEI_MAP, 964 port, i); 965 } 966 } 967 968 static void felix_get_strings(struct dsa_switch *ds, int port, 969 u32 stringset, u8 *data) 970 { 971 struct ocelot *ocelot = ds->priv; 972 973 return ocelot_get_strings(ocelot, port, stringset, data); 974 } 975 976 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 977 { 978 struct ocelot *ocelot = ds->priv; 979 980 ocelot_get_ethtool_stats(ocelot, port, data); 981 } 982 983 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 984 { 985 struct ocelot *ocelot = ds->priv; 986 987 return ocelot_get_sset_count(ocelot, port, sset); 988 } 989 990 static int felix_get_ts_info(struct dsa_switch *ds, int port, 991 struct ethtool_ts_info *info) 992 { 993 struct ocelot *ocelot = ds->priv; 994 995 return ocelot_get_ts_info(ocelot, port, info); 996 } 997 998 static int felix_parse_ports_node(struct felix *felix, 999 struct device_node *ports_node, 1000 phy_interface_t *port_phy_modes) 1001 { 1002 struct ocelot *ocelot = &felix->ocelot; 1003 struct device *dev = felix->ocelot.dev; 1004 struct device_node *child; 1005 1006 for_each_available_child_of_node(ports_node, child) { 1007 phy_interface_t phy_mode; 1008 u32 port; 1009 int err; 1010 1011 /* Get switch port number from DT */ 1012 if (of_property_read_u32(child, "reg", &port) < 0) { 1013 dev_err(dev, "Port number not defined in device tree " 1014 "(property \"reg\")\n"); 1015 of_node_put(child); 1016 return -ENODEV; 1017 } 1018 1019 /* Get PHY mode from DT */ 1020 err = of_get_phy_mode(child, &phy_mode); 1021 if (err) { 1022 dev_err(dev, "Failed to read phy-mode or " 1023 "phy-interface-type property for port %d\n", 1024 port); 1025 of_node_put(child); 1026 return -ENODEV; 1027 } 1028 1029 err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); 1030 if (err < 0) { 1031 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 1032 phy_modes(phy_mode), port); 1033 of_node_put(child); 1034 return err; 1035 } 1036 1037 port_phy_modes[port] = phy_mode; 1038 } 1039 1040 return 0; 1041 } 1042 1043 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 1044 { 1045 struct device *dev = felix->ocelot.dev; 1046 struct device_node *switch_node; 1047 struct device_node *ports_node; 1048 int err; 1049 1050 switch_node = dev->of_node; 1051 1052 ports_node = of_get_child_by_name(switch_node, "ports"); 1053 if (!ports_node) { 1054 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 1055 return -ENODEV; 1056 } 1057 1058 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 1059 of_node_put(ports_node); 1060 1061 return err; 1062 } 1063 1064 static int felix_init_structs(struct felix *felix, int num_phys_ports) 1065 { 1066 struct ocelot *ocelot = &felix->ocelot; 1067 phy_interface_t *port_phy_modes; 1068 struct resource res; 1069 int port, i, err; 1070 1071 ocelot->num_phys_ports = num_phys_ports; 1072 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 1073 sizeof(struct ocelot_port *), GFP_KERNEL); 1074 if (!ocelot->ports) 1075 return -ENOMEM; 1076 1077 ocelot->map = felix->info->map; 1078 ocelot->stats_layout = felix->info->stats_layout; 1079 ocelot->num_stats = felix->info->num_stats; 1080 ocelot->num_mact_rows = felix->info->num_mact_rows; 1081 ocelot->vcap = felix->info->vcap; 1082 ocelot->ops = felix->info->ops; 1083 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1084 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1085 ocelot->devlink = felix->ds->devlink; 1086 1087 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1088 GFP_KERNEL); 1089 if (!port_phy_modes) 1090 return -ENOMEM; 1091 1092 err = felix_parse_dt(felix, port_phy_modes); 1093 if (err) { 1094 kfree(port_phy_modes); 1095 return err; 1096 } 1097 1098 for (i = 0; i < TARGET_MAX; i++) { 1099 struct regmap *target; 1100 1101 if (!felix->info->target_io_res[i].name) 1102 continue; 1103 1104 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1105 res.flags = IORESOURCE_MEM; 1106 res.start += felix->switch_base; 1107 res.end += felix->switch_base; 1108 1109 target = ocelot_regmap_init(ocelot, &res); 1110 if (IS_ERR(target)) { 1111 dev_err(ocelot->dev, 1112 "Failed to map device memory space\n"); 1113 kfree(port_phy_modes); 1114 return PTR_ERR(target); 1115 } 1116 1117 ocelot->targets[i] = target; 1118 } 1119 1120 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1121 if (err) { 1122 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1123 kfree(port_phy_modes); 1124 return err; 1125 } 1126 1127 for (port = 0; port < num_phys_ports; port++) { 1128 struct ocelot_port *ocelot_port; 1129 struct regmap *target; 1130 1131 ocelot_port = devm_kzalloc(ocelot->dev, 1132 sizeof(struct ocelot_port), 1133 GFP_KERNEL); 1134 if (!ocelot_port) { 1135 dev_err(ocelot->dev, 1136 "failed to allocate port memory\n"); 1137 kfree(port_phy_modes); 1138 return -ENOMEM; 1139 } 1140 1141 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1142 res.flags = IORESOURCE_MEM; 1143 res.start += felix->switch_base; 1144 res.end += felix->switch_base; 1145 1146 target = ocelot_regmap_init(ocelot, &res); 1147 if (IS_ERR(target)) { 1148 dev_err(ocelot->dev, 1149 "Failed to map memory space for port %d\n", 1150 port); 1151 kfree(port_phy_modes); 1152 return PTR_ERR(target); 1153 } 1154 1155 ocelot_port->phy_mode = port_phy_modes[port]; 1156 ocelot_port->ocelot = ocelot; 1157 ocelot_port->target = target; 1158 ocelot->ports[port] = ocelot_port; 1159 } 1160 1161 kfree(port_phy_modes); 1162 1163 if (felix->info->mdio_bus_alloc) { 1164 err = felix->info->mdio_bus_alloc(ocelot); 1165 if (err < 0) 1166 return err; 1167 } 1168 1169 return 0; 1170 } 1171 1172 /* Hardware initialization done here so that we can allocate structures with 1173 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1174 * us to allocate structures twice (leak memory) and map PCI memory twice 1175 * (which will not work). 1176 */ 1177 static int felix_setup(struct dsa_switch *ds) 1178 { 1179 struct ocelot *ocelot = ds->priv; 1180 struct felix *felix = ocelot_to_felix(ocelot); 1181 int port, err; 1182 1183 err = felix_init_structs(felix, ds->num_ports); 1184 if (err) 1185 return err; 1186 1187 err = ocelot_init(ocelot); 1188 if (err) 1189 goto out_mdiobus_free; 1190 1191 if (ocelot->ptp) { 1192 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1193 if (err) { 1194 dev_err(ocelot->dev, 1195 "Timestamp initialization failed\n"); 1196 ocelot->ptp = 0; 1197 } 1198 } 1199 1200 for (port = 0; port < ds->num_ports; port++) { 1201 if (dsa_is_unused_port(ds, port)) 1202 continue; 1203 1204 ocelot_init_port(ocelot, port); 1205 1206 /* Set the default QoS Classification based on PCP and DEI 1207 * bits of vlan tag. 1208 */ 1209 felix_port_qos_map_init(ocelot, port); 1210 } 1211 1212 err = ocelot_devlink_sb_register(ocelot); 1213 if (err) 1214 goto out_deinit_ports; 1215 1216 for (port = 0; port < ds->num_ports; port++) { 1217 if (!dsa_is_cpu_port(ds, port)) 1218 continue; 1219 1220 /* The initial tag protocol is NPI which always returns 0, so 1221 * there's no real point in checking for errors. 1222 */ 1223 felix_set_tag_protocol(ds, port, felix->tag_proto); 1224 } 1225 1226 ds->mtu_enforcement_ingress = true; 1227 ds->assisted_learning_on_cpu_port = true; 1228 1229 return 0; 1230 1231 out_deinit_ports: 1232 for (port = 0; port < ocelot->num_phys_ports; port++) { 1233 if (dsa_is_unused_port(ds, port)) 1234 continue; 1235 1236 ocelot_deinit_port(ocelot, port); 1237 } 1238 1239 ocelot_deinit_timestamp(ocelot); 1240 ocelot_deinit(ocelot); 1241 1242 out_mdiobus_free: 1243 if (felix->info->mdio_bus_free) 1244 felix->info->mdio_bus_free(ocelot); 1245 1246 return err; 1247 } 1248 1249 static void felix_teardown(struct dsa_switch *ds) 1250 { 1251 struct ocelot *ocelot = ds->priv; 1252 struct felix *felix = ocelot_to_felix(ocelot); 1253 int port; 1254 1255 for (port = 0; port < ds->num_ports; port++) { 1256 if (!dsa_is_cpu_port(ds, port)) 1257 continue; 1258 1259 felix_del_tag_protocol(ds, port, felix->tag_proto); 1260 } 1261 1262 ocelot_devlink_sb_unregister(ocelot); 1263 ocelot_deinit_timestamp(ocelot); 1264 ocelot_deinit(ocelot); 1265 1266 for (port = 0; port < ocelot->num_phys_ports; port++) { 1267 if (dsa_is_unused_port(ds, port)) 1268 continue; 1269 1270 ocelot_deinit_port(ocelot, port); 1271 } 1272 1273 if (felix->info->mdio_bus_free) 1274 felix->info->mdio_bus_free(ocelot); 1275 } 1276 1277 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1278 struct ifreq *ifr) 1279 { 1280 struct ocelot *ocelot = ds->priv; 1281 1282 return ocelot_hwstamp_get(ocelot, port, ifr); 1283 } 1284 1285 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1286 struct ifreq *ifr) 1287 { 1288 struct ocelot *ocelot = ds->priv; 1289 1290 return ocelot_hwstamp_set(ocelot, port, ifr); 1291 } 1292 1293 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) 1294 { 1295 struct felix *felix = ocelot_to_felix(ocelot); 1296 int err, grp = 0; 1297 1298 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1299 return false; 1300 1301 if (!felix->info->quirk_no_xtr_irq) 1302 return false; 1303 1304 if (ptp_type == PTP_CLASS_NONE) 1305 return false; 1306 1307 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1308 struct sk_buff *skb; 1309 unsigned int type; 1310 1311 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1312 if (err) 1313 goto out; 1314 1315 /* We trap to the CPU port module all PTP frames, but 1316 * felix_rxtstamp() only gets called for event frames. 1317 * So we need to avoid sending duplicate general 1318 * message frames by running a second BPF classifier 1319 * here and dropping those. 1320 */ 1321 __skb_push(skb, ETH_HLEN); 1322 1323 type = ptp_classify_raw(skb); 1324 1325 __skb_pull(skb, ETH_HLEN); 1326 1327 if (type == PTP_CLASS_NONE) { 1328 kfree_skb(skb); 1329 continue; 1330 } 1331 1332 netif_rx(skb); 1333 } 1334 1335 out: 1336 if (err < 0) 1337 ocelot_drain_cpu_queue(ocelot, 0); 1338 1339 return true; 1340 } 1341 1342 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1343 struct sk_buff *skb, unsigned int type) 1344 { 1345 u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN; 1346 struct skb_shared_hwtstamps *shhwtstamps; 1347 struct ocelot *ocelot = ds->priv; 1348 u32 tstamp_lo, tstamp_hi; 1349 struct timespec64 ts; 1350 u64 tstamp, val; 1351 1352 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1353 * for RX timestamping. Then free it, and poll for its copy through 1354 * MMIO in the CPU port module, and inject that into the stack from 1355 * ocelot_xtr_poll(). 1356 */ 1357 if (felix_check_xtr_pkt(ocelot, type)) { 1358 kfree_skb(skb); 1359 return true; 1360 } 1361 1362 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1363 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1364 1365 ocelot_xfh_get_rew_val(extraction, &val); 1366 tstamp_lo = (u32)val; 1367 1368 tstamp_hi = tstamp >> 32; 1369 if ((tstamp & 0xffffffff) < tstamp_lo) 1370 tstamp_hi--; 1371 1372 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1373 1374 shhwtstamps = skb_hwtstamps(skb); 1375 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1376 shhwtstamps->hwtstamp = tstamp; 1377 return false; 1378 } 1379 1380 static void felix_txtstamp(struct dsa_switch *ds, int port, 1381 struct sk_buff *skb) 1382 { 1383 struct ocelot *ocelot = ds->priv; 1384 struct sk_buff *clone = NULL; 1385 1386 if (!ocelot->ptp) 1387 return; 1388 1389 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) 1390 return; 1391 1392 if (clone) 1393 OCELOT_SKB_CB(skb)->clone = clone; 1394 } 1395 1396 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1397 { 1398 struct ocelot *ocelot = ds->priv; 1399 1400 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1401 1402 return 0; 1403 } 1404 1405 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1406 { 1407 struct ocelot *ocelot = ds->priv; 1408 1409 return ocelot_get_max_mtu(ocelot, port); 1410 } 1411 1412 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1413 struct flow_cls_offload *cls, bool ingress) 1414 { 1415 struct ocelot *ocelot = ds->priv; 1416 1417 return ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1418 } 1419 1420 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1421 struct flow_cls_offload *cls, bool ingress) 1422 { 1423 struct ocelot *ocelot = ds->priv; 1424 1425 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1426 } 1427 1428 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1429 struct flow_cls_offload *cls, bool ingress) 1430 { 1431 struct ocelot *ocelot = ds->priv; 1432 1433 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1434 } 1435 1436 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1437 struct dsa_mall_policer_tc_entry *policer) 1438 { 1439 struct ocelot *ocelot = ds->priv; 1440 struct ocelot_policer pol = { 1441 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1442 .burst = policer->burst, 1443 }; 1444 1445 return ocelot_port_policer_add(ocelot, port, &pol); 1446 } 1447 1448 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1449 { 1450 struct ocelot *ocelot = ds->priv; 1451 1452 ocelot_port_policer_del(ocelot, port); 1453 } 1454 1455 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1456 enum tc_setup_type type, 1457 void *type_data) 1458 { 1459 struct ocelot *ocelot = ds->priv; 1460 struct felix *felix = ocelot_to_felix(ocelot); 1461 1462 if (felix->info->port_setup_tc) 1463 return felix->info->port_setup_tc(ds, port, type, type_data); 1464 else 1465 return -EOPNOTSUPP; 1466 } 1467 1468 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1469 u16 pool_index, 1470 struct devlink_sb_pool_info *pool_info) 1471 { 1472 struct ocelot *ocelot = ds->priv; 1473 1474 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1475 } 1476 1477 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1478 u16 pool_index, u32 size, 1479 enum devlink_sb_threshold_type threshold_type, 1480 struct netlink_ext_ack *extack) 1481 { 1482 struct ocelot *ocelot = ds->priv; 1483 1484 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1485 threshold_type, extack); 1486 } 1487 1488 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1489 unsigned int sb_index, u16 pool_index, 1490 u32 *p_threshold) 1491 { 1492 struct ocelot *ocelot = ds->priv; 1493 1494 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1495 p_threshold); 1496 } 1497 1498 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1499 unsigned int sb_index, u16 pool_index, 1500 u32 threshold, struct netlink_ext_ack *extack) 1501 { 1502 struct ocelot *ocelot = ds->priv; 1503 1504 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1505 threshold, extack); 1506 } 1507 1508 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1509 unsigned int sb_index, u16 tc_index, 1510 enum devlink_sb_pool_type pool_type, 1511 u16 *p_pool_index, u32 *p_threshold) 1512 { 1513 struct ocelot *ocelot = ds->priv; 1514 1515 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1516 pool_type, p_pool_index, 1517 p_threshold); 1518 } 1519 1520 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1521 unsigned int sb_index, u16 tc_index, 1522 enum devlink_sb_pool_type pool_type, 1523 u16 pool_index, u32 threshold, 1524 struct netlink_ext_ack *extack) 1525 { 1526 struct ocelot *ocelot = ds->priv; 1527 1528 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1529 pool_type, pool_index, threshold, 1530 extack); 1531 } 1532 1533 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1534 unsigned int sb_index) 1535 { 1536 struct ocelot *ocelot = ds->priv; 1537 1538 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1539 } 1540 1541 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1542 unsigned int sb_index) 1543 { 1544 struct ocelot *ocelot = ds->priv; 1545 1546 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1547 } 1548 1549 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1550 unsigned int sb_index, u16 pool_index, 1551 u32 *p_cur, u32 *p_max) 1552 { 1553 struct ocelot *ocelot = ds->priv; 1554 1555 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1556 p_cur, p_max); 1557 } 1558 1559 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1560 unsigned int sb_index, u16 tc_index, 1561 enum devlink_sb_pool_type pool_type, 1562 u32 *p_cur, u32 *p_max) 1563 { 1564 struct ocelot *ocelot = ds->priv; 1565 1566 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1567 pool_type, p_cur, p_max); 1568 } 1569 1570 static int felix_mrp_add(struct dsa_switch *ds, int port, 1571 const struct switchdev_obj_mrp *mrp) 1572 { 1573 struct ocelot *ocelot = ds->priv; 1574 1575 return ocelot_mrp_add(ocelot, port, mrp); 1576 } 1577 1578 static int felix_mrp_del(struct dsa_switch *ds, int port, 1579 const struct switchdev_obj_mrp *mrp) 1580 { 1581 struct ocelot *ocelot = ds->priv; 1582 1583 return ocelot_mrp_add(ocelot, port, mrp); 1584 } 1585 1586 static int 1587 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1588 const struct switchdev_obj_ring_role_mrp *mrp) 1589 { 1590 struct ocelot *ocelot = ds->priv; 1591 1592 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1593 } 1594 1595 static int 1596 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1597 const struct switchdev_obj_ring_role_mrp *mrp) 1598 { 1599 struct ocelot *ocelot = ds->priv; 1600 1601 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1602 } 1603 1604 const struct dsa_switch_ops felix_switch_ops = { 1605 .get_tag_protocol = felix_get_tag_protocol, 1606 .change_tag_protocol = felix_change_tag_protocol, 1607 .setup = felix_setup, 1608 .teardown = felix_teardown, 1609 .set_ageing_time = felix_set_ageing_time, 1610 .get_strings = felix_get_strings, 1611 .get_ethtool_stats = felix_get_ethtool_stats, 1612 .get_sset_count = felix_get_sset_count, 1613 .get_ts_info = felix_get_ts_info, 1614 .phylink_validate = felix_phylink_validate, 1615 .phylink_mac_config = felix_phylink_mac_config, 1616 .phylink_mac_link_down = felix_phylink_mac_link_down, 1617 .phylink_mac_link_up = felix_phylink_mac_link_up, 1618 .port_enable = felix_port_enable, 1619 .port_disable = felix_port_disable, 1620 .port_fdb_dump = felix_fdb_dump, 1621 .port_fdb_add = felix_fdb_add, 1622 .port_fdb_del = felix_fdb_del, 1623 .port_mdb_add = felix_mdb_add, 1624 .port_mdb_del = felix_mdb_del, 1625 .port_pre_bridge_flags = felix_pre_bridge_flags, 1626 .port_bridge_flags = felix_bridge_flags, 1627 .port_bridge_join = felix_bridge_join, 1628 .port_bridge_leave = felix_bridge_leave, 1629 .port_lag_join = felix_lag_join, 1630 .port_lag_leave = felix_lag_leave, 1631 .port_lag_change = felix_lag_change, 1632 .port_stp_state_set = felix_bridge_stp_state_set, 1633 .port_vlan_filtering = felix_vlan_filtering, 1634 .port_vlan_add = felix_vlan_add, 1635 .port_vlan_del = felix_vlan_del, 1636 .port_hwtstamp_get = felix_hwtstamp_get, 1637 .port_hwtstamp_set = felix_hwtstamp_set, 1638 .port_rxtstamp = felix_rxtstamp, 1639 .port_txtstamp = felix_txtstamp, 1640 .port_change_mtu = felix_change_mtu, 1641 .port_max_mtu = felix_get_max_mtu, 1642 .port_policer_add = felix_port_policer_add, 1643 .port_policer_del = felix_port_policer_del, 1644 .cls_flower_add = felix_cls_flower_add, 1645 .cls_flower_del = felix_cls_flower_del, 1646 .cls_flower_stats = felix_cls_flower_stats, 1647 .port_setup_tc = felix_port_setup_tc, 1648 .devlink_sb_pool_get = felix_sb_pool_get, 1649 .devlink_sb_pool_set = felix_sb_pool_set, 1650 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1651 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1652 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1653 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1654 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1655 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1656 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1657 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1658 .port_mrp_add = felix_mrp_add, 1659 .port_mrp_del = felix_mrp_del, 1660 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1661 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1662 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1663 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1664 }; 1665 1666 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1667 { 1668 struct felix *felix = ocelot_to_felix(ocelot); 1669 struct dsa_switch *ds = felix->ds; 1670 1671 if (!dsa_is_user_port(ds, port)) 1672 return NULL; 1673 1674 return dsa_to_port(ds, port)->slave; 1675 } 1676 1677 int felix_netdev_to_port(struct net_device *dev) 1678 { 1679 struct dsa_port *dp; 1680 1681 dp = dsa_port_from_netdev(dev); 1682 if (IS_ERR(dp)) 1683 return -EINVAL; 1684 1685 return dp->index; 1686 } 1687