1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 /* flush all VLANs */ 38 .vid = 0, 39 }; 40 41 /* When the port becomes standalone it has already left the bridge. 42 * Don't notify the bridge in that case. 43 */ 44 if (!brport_dev) 45 return; 46 47 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 48 brport_dev, &info.info, NULL); 49 } 50 51 static void dsa_port_fast_age(const struct dsa_port *dp) 52 { 53 struct dsa_switch *ds = dp->ds; 54 55 if (!ds->ops->port_fast_age) 56 return; 57 58 ds->ops->port_fast_age(ds, dp->index); 59 60 dsa_port_notify_bridge_fdb_flush(dp); 61 } 62 63 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 64 { 65 struct switchdev_brport_flags flags = { 66 .mask = BR_LEARNING, 67 }; 68 struct dsa_switch *ds = dp->ds; 69 int err; 70 71 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 72 return false; 73 74 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 75 return !err; 76 } 77 78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 79 { 80 struct dsa_switch *ds = dp->ds; 81 int port = dp->index; 82 83 if (!ds->ops->port_stp_state_set) 84 return -EOPNOTSUPP; 85 86 ds->ops->port_stp_state_set(ds, port, state); 87 88 if (!dsa_port_can_configure_learning(dp) || 89 (do_fast_age && dp->learning)) { 90 /* Fast age FDB entries or flush appropriate forwarding database 91 * for the given port, if we are moving it from Learning or 92 * Forwarding state, to Disabled or Blocking or Listening state. 93 * Ports that were standalone before the STP state change don't 94 * need to fast age the FDB, since address learning is off in 95 * standalone mode. 96 */ 97 98 if ((dp->stp_state == BR_STATE_LEARNING || 99 dp->stp_state == BR_STATE_FORWARDING) && 100 (state == BR_STATE_DISABLED || 101 state == BR_STATE_BLOCKING || 102 state == BR_STATE_LISTENING)) 103 dsa_port_fast_age(dp); 104 } 105 106 dp->stp_state = state; 107 108 return 0; 109 } 110 111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 112 bool do_fast_age) 113 { 114 int err; 115 116 err = dsa_port_set_state(dp, state, do_fast_age); 117 if (err) 118 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 119 } 120 121 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 122 { 123 struct dsa_switch *ds = dp->ds; 124 int port = dp->index; 125 int err; 126 127 if (ds->ops->port_enable) { 128 err = ds->ops->port_enable(ds, port, phy); 129 if (err) 130 return err; 131 } 132 133 if (!dp->bridge_dev) 134 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 135 136 if (dp->pl) 137 phylink_start(dp->pl); 138 139 return 0; 140 } 141 142 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 143 { 144 int err; 145 146 rtnl_lock(); 147 err = dsa_port_enable_rt(dp, phy); 148 rtnl_unlock(); 149 150 return err; 151 } 152 153 void dsa_port_disable_rt(struct dsa_port *dp) 154 { 155 struct dsa_switch *ds = dp->ds; 156 int port = dp->index; 157 158 if (dp->pl) 159 phylink_stop(dp->pl); 160 161 if (!dp->bridge_dev) 162 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 163 164 if (ds->ops->port_disable) 165 ds->ops->port_disable(ds, port); 166 } 167 168 void dsa_port_disable(struct dsa_port *dp) 169 { 170 rtnl_lock(); 171 dsa_port_disable_rt(dp); 172 rtnl_unlock(); 173 } 174 175 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 176 struct netlink_ext_ack *extack) 177 { 178 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 179 BR_BCAST_FLOOD; 180 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 181 int flag, err; 182 183 for_each_set_bit(flag, &mask, 32) { 184 struct switchdev_brport_flags flags = {0}; 185 186 flags.mask = BIT(flag); 187 188 if (br_port_flag_is_set(brport_dev, BIT(flag))) 189 flags.val = BIT(flag); 190 191 err = dsa_port_bridge_flags(dp, flags, extack); 192 if (err && err != -EOPNOTSUPP) 193 return err; 194 } 195 196 return 0; 197 } 198 199 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 200 { 201 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 202 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 203 BR_BCAST_FLOOD; 204 int flag, err; 205 206 for_each_set_bit(flag, &mask, 32) { 207 struct switchdev_brport_flags flags = {0}; 208 209 flags.mask = BIT(flag); 210 flags.val = val & BIT(flag); 211 212 err = dsa_port_bridge_flags(dp, flags, NULL); 213 if (err && err != -EOPNOTSUPP) 214 dev_err(dp->ds->dev, 215 "failed to clear bridge port flag %lu: %pe\n", 216 flags.val, ERR_PTR(err)); 217 } 218 } 219 220 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 221 struct netlink_ext_ack *extack) 222 { 223 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 224 struct net_device *br = dp->bridge_dev; 225 int err; 226 227 err = dsa_port_inherit_brport_flags(dp, extack); 228 if (err) 229 return err; 230 231 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 232 if (err && err != -EOPNOTSUPP) 233 return err; 234 235 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 236 if (err && err != -EOPNOTSUPP) 237 return err; 238 239 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 240 if (err && err != -EOPNOTSUPP) 241 return err; 242 243 return 0; 244 } 245 246 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) 247 { 248 /* Configure the port for standalone mode (no address learning, 249 * flood everything). 250 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 251 * when the user requests it through netlink or sysfs, but not 252 * automatically at port join or leave, so we need to handle resetting 253 * the brport flags ourselves. But we even prefer it that way, because 254 * otherwise, some setups might never get the notification they need, 255 * for example, when a port leaves a LAG that offloads the bridge, 256 * it becomes standalone, but as far as the bridge is concerned, no 257 * port ever left. 258 */ 259 dsa_port_clear_brport_flags(dp); 260 261 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 262 * so allow it to be in BR_STATE_FORWARDING to be kept functional 263 */ 264 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 265 266 /* VLAN filtering is handled by dsa_switch_bridge_leave */ 267 268 /* Ageing time may be global to the switch chip, so don't change it 269 * here because we have no good reason (or value) to change it to. 270 */ 271 } 272 273 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp, 274 struct net_device *bridge_dev) 275 { 276 int bridge_num = dp->bridge_num; 277 struct dsa_switch *ds = dp->ds; 278 279 /* No bridge TX forwarding offload => do nothing */ 280 if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1) 281 return; 282 283 dp->bridge_num = -1; 284 285 dsa_bridge_num_put(bridge_dev, bridge_num); 286 287 /* Notify the chips only once the offload has been deactivated, so 288 * that they can update their configuration accordingly. 289 */ 290 ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev, 291 bridge_num); 292 } 293 294 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp, 295 struct net_device *bridge_dev) 296 { 297 struct dsa_switch *ds = dp->ds; 298 int bridge_num, err; 299 300 if (!ds->ops->port_bridge_tx_fwd_offload) 301 return false; 302 303 bridge_num = dsa_bridge_num_get(bridge_dev, 304 ds->num_fwd_offloading_bridges); 305 if (bridge_num < 0) 306 return false; 307 308 dp->bridge_num = bridge_num; 309 310 /* Notify the driver */ 311 err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev, 312 bridge_num); 313 if (err) { 314 dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev); 315 return false; 316 } 317 318 return true; 319 } 320 321 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 322 struct netlink_ext_ack *extack) 323 { 324 struct dsa_notifier_bridge_info info = { 325 .tree_index = dp->ds->dst->index, 326 .sw_index = dp->ds->index, 327 .port = dp->index, 328 .br = br, 329 }; 330 struct net_device *dev = dp->slave; 331 struct net_device *brport_dev; 332 bool tx_fwd_offload; 333 int err; 334 335 /* Here the interface is already bridged. Reflect the current 336 * configuration so that drivers can program their chips accordingly. 337 */ 338 dp->bridge_dev = br; 339 340 brport_dev = dsa_port_to_bridge_port(dp); 341 342 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 343 if (err) 344 goto out_rollback; 345 346 tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br); 347 348 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 349 &dsa_slave_switchdev_notifier, 350 &dsa_slave_switchdev_blocking_notifier, 351 tx_fwd_offload, extack); 352 if (err) 353 goto out_rollback_unbridge; 354 355 err = dsa_port_switchdev_sync_attrs(dp, extack); 356 if (err) 357 goto out_rollback_unoffload; 358 359 return 0; 360 361 out_rollback_unoffload: 362 switchdev_bridge_port_unoffload(brport_dev, dp, 363 &dsa_slave_switchdev_notifier, 364 &dsa_slave_switchdev_blocking_notifier); 365 out_rollback_unbridge: 366 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 367 out_rollback: 368 dp->bridge_dev = NULL; 369 return err; 370 } 371 372 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 373 { 374 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 375 376 /* Don't try to unoffload something that is not offloaded */ 377 if (!brport_dev) 378 return; 379 380 switchdev_bridge_port_unoffload(brport_dev, dp, 381 &dsa_slave_switchdev_notifier, 382 &dsa_slave_switchdev_blocking_notifier); 383 } 384 385 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 386 { 387 struct dsa_notifier_bridge_info info = { 388 .tree_index = dp->ds->dst->index, 389 .sw_index = dp->ds->index, 390 .port = dp->index, 391 .br = br, 392 }; 393 int err; 394 395 /* Here the port is already unbridged. Reflect the current configuration 396 * so that drivers can program their chips accordingly. 397 */ 398 dp->bridge_dev = NULL; 399 400 dsa_port_bridge_tx_fwd_unoffload(dp, br); 401 402 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 403 if (err) 404 dev_err(dp->ds->dev, 405 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 406 dp->index, ERR_PTR(err)); 407 408 dsa_port_switchdev_unsync_attrs(dp); 409 } 410 411 int dsa_port_lag_change(struct dsa_port *dp, 412 struct netdev_lag_lower_state_info *linfo) 413 { 414 struct dsa_notifier_lag_info info = { 415 .sw_index = dp->ds->index, 416 .port = dp->index, 417 }; 418 bool tx_enabled; 419 420 if (!dp->lag_dev) 421 return 0; 422 423 /* On statically configured aggregates (e.g. loadbalance 424 * without LACP) ports will always be tx_enabled, even if the 425 * link is down. Thus we require both link_up and tx_enabled 426 * in order to include it in the tx set. 427 */ 428 tx_enabled = linfo->link_up && linfo->tx_enabled; 429 430 if (tx_enabled == dp->lag_tx_enabled) 431 return 0; 432 433 dp->lag_tx_enabled = tx_enabled; 434 435 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 436 } 437 438 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag, 439 struct netdev_lag_upper_info *uinfo, 440 struct netlink_ext_ack *extack) 441 { 442 struct dsa_notifier_lag_info info = { 443 .sw_index = dp->ds->index, 444 .port = dp->index, 445 .lag = lag, 446 .info = uinfo, 447 }; 448 struct net_device *bridge_dev; 449 int err; 450 451 dsa_lag_map(dp->ds->dst, lag); 452 dp->lag_dev = lag; 453 454 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 455 if (err) 456 goto err_lag_join; 457 458 bridge_dev = netdev_master_upper_dev_get(lag); 459 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 460 return 0; 461 462 err = dsa_port_bridge_join(dp, bridge_dev, extack); 463 if (err) 464 goto err_bridge_join; 465 466 return 0; 467 468 err_bridge_join: 469 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 470 err_lag_join: 471 dp->lag_dev = NULL; 472 dsa_lag_unmap(dp->ds->dst, lag); 473 return err; 474 } 475 476 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag) 477 { 478 if (dp->bridge_dev) 479 dsa_port_pre_bridge_leave(dp, dp->bridge_dev); 480 } 481 482 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag) 483 { 484 struct dsa_notifier_lag_info info = { 485 .sw_index = dp->ds->index, 486 .port = dp->index, 487 .lag = lag, 488 }; 489 int err; 490 491 if (!dp->lag_dev) 492 return; 493 494 /* Port might have been part of a LAG that in turn was 495 * attached to a bridge. 496 */ 497 if (dp->bridge_dev) 498 dsa_port_bridge_leave(dp, dp->bridge_dev); 499 500 dp->lag_tx_enabled = false; 501 dp->lag_dev = NULL; 502 503 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 504 if (err) 505 dev_err(dp->ds->dev, 506 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 507 dp->index, ERR_PTR(err)); 508 509 dsa_lag_unmap(dp->ds->dst, lag); 510 } 511 512 /* Must be called under rcu_read_lock() */ 513 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 514 bool vlan_filtering, 515 struct netlink_ext_ack *extack) 516 { 517 struct dsa_switch *ds = dp->ds; 518 int err, i; 519 520 /* VLAN awareness was off, so the question is "can we turn it on". 521 * We may have had 8021q uppers, those need to go. Make sure we don't 522 * enter an inconsistent state: deny changing the VLAN awareness state 523 * as long as we have 8021q uppers. 524 */ 525 if (vlan_filtering && dsa_is_user_port(ds, dp->index)) { 526 struct net_device *upper_dev, *slave = dp->slave; 527 struct net_device *br = dp->bridge_dev; 528 struct list_head *iter; 529 530 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 531 struct bridge_vlan_info br_info; 532 u16 vid; 533 534 if (!is_vlan_dev(upper_dev)) 535 continue; 536 537 vid = vlan_dev_vlan_id(upper_dev); 538 539 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 540 * device, respectively the VID is not found, returning 541 * 0 means success, which is a failure for us here. 542 */ 543 err = br_vlan_get_info(br, vid, &br_info); 544 if (err == 0) { 545 NL_SET_ERR_MSG_MOD(extack, 546 "Must first remove VLAN uppers having VIDs also present in bridge"); 547 return false; 548 } 549 } 550 } 551 552 if (!ds->vlan_filtering_is_global) 553 return true; 554 555 /* For cases where enabling/disabling VLAN awareness is global to the 556 * switch, we need to handle the case where multiple bridges span 557 * different ports of the same switch device and one of them has a 558 * different setting than what is being requested. 559 */ 560 for (i = 0; i < ds->num_ports; i++) { 561 struct net_device *other_bridge; 562 563 other_bridge = dsa_to_port(ds, i)->bridge_dev; 564 if (!other_bridge) 565 continue; 566 /* If it's the same bridge, it also has same 567 * vlan_filtering setting => no need to check 568 */ 569 if (other_bridge == dp->bridge_dev) 570 continue; 571 if (br_vlan_enabled(other_bridge) != vlan_filtering) { 572 NL_SET_ERR_MSG_MOD(extack, 573 "VLAN filtering is a global setting"); 574 return false; 575 } 576 } 577 return true; 578 } 579 580 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 581 struct netlink_ext_ack *extack) 582 { 583 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 584 struct dsa_switch *ds = dp->ds; 585 bool apply; 586 int err; 587 588 if (!ds->ops->port_vlan_filtering) 589 return -EOPNOTSUPP; 590 591 /* We are called from dsa_slave_switchdev_blocking_event(), 592 * which is not under rcu_read_lock(), unlike 593 * dsa_slave_switchdev_event(). 594 */ 595 rcu_read_lock(); 596 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 597 rcu_read_unlock(); 598 if (!apply) 599 return -EINVAL; 600 601 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 602 return 0; 603 604 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 605 extack); 606 if (err) 607 return err; 608 609 if (ds->vlan_filtering_is_global) { 610 int port; 611 612 ds->vlan_filtering = vlan_filtering; 613 614 for (port = 0; port < ds->num_ports; port++) { 615 struct net_device *slave; 616 617 if (!dsa_is_user_port(ds, port)) 618 continue; 619 620 /* We might be called in the unbind path, so not 621 * all slave devices might still be registered. 622 */ 623 slave = dsa_to_port(ds, port)->slave; 624 if (!slave) 625 continue; 626 627 err = dsa_slave_manage_vlan_filtering(slave, 628 vlan_filtering); 629 if (err) 630 goto restore; 631 } 632 } else { 633 dp->vlan_filtering = vlan_filtering; 634 635 err = dsa_slave_manage_vlan_filtering(dp->slave, 636 vlan_filtering); 637 if (err) 638 goto restore; 639 } 640 641 return 0; 642 643 restore: 644 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 645 646 if (ds->vlan_filtering_is_global) 647 ds->vlan_filtering = old_vlan_filtering; 648 else 649 dp->vlan_filtering = old_vlan_filtering; 650 651 return err; 652 } 653 654 /* This enforces legacy behavior for switch drivers which assume they can't 655 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 656 */ 657 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 658 { 659 struct dsa_switch *ds = dp->ds; 660 661 if (!dp->bridge_dev) 662 return false; 663 664 return (!ds->configure_vlan_while_not_filtering && 665 !br_vlan_enabled(dp->bridge_dev)); 666 } 667 668 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 669 { 670 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 671 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 672 struct dsa_notifier_ageing_time_info info; 673 int err; 674 675 info.ageing_time = ageing_time; 676 677 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 678 if (err) 679 return err; 680 681 dp->ageing_time = ageing_time; 682 683 return 0; 684 } 685 686 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 687 struct switchdev_brport_flags flags, 688 struct netlink_ext_ack *extack) 689 { 690 struct dsa_switch *ds = dp->ds; 691 692 if (!ds->ops->port_pre_bridge_flags) 693 return -EINVAL; 694 695 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 696 } 697 698 int dsa_port_bridge_flags(struct dsa_port *dp, 699 struct switchdev_brport_flags flags, 700 struct netlink_ext_ack *extack) 701 { 702 struct dsa_switch *ds = dp->ds; 703 int err; 704 705 if (!ds->ops->port_bridge_flags) 706 return -EOPNOTSUPP; 707 708 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 709 if (err) 710 return err; 711 712 if (flags.mask & BR_LEARNING) { 713 bool learning = flags.val & BR_LEARNING; 714 715 if (learning == dp->learning) 716 return 0; 717 718 if ((dp->learning && !learning) && 719 (dp->stp_state == BR_STATE_LEARNING || 720 dp->stp_state == BR_STATE_FORWARDING)) 721 dsa_port_fast_age(dp); 722 723 dp->learning = learning; 724 } 725 726 return 0; 727 } 728 729 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, 730 bool targeted_match) 731 { 732 struct dsa_notifier_mtu_info info = { 733 .sw_index = dp->ds->index, 734 .targeted_match = targeted_match, 735 .port = dp->index, 736 .mtu = new_mtu, 737 }; 738 739 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 740 } 741 742 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 743 u16 vid) 744 { 745 struct dsa_notifier_fdb_info info = { 746 .sw_index = dp->ds->index, 747 .port = dp->index, 748 .addr = addr, 749 .vid = vid, 750 }; 751 752 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 753 } 754 755 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 756 u16 vid) 757 { 758 struct dsa_notifier_fdb_info info = { 759 .sw_index = dp->ds->index, 760 .port = dp->index, 761 .addr = addr, 762 .vid = vid, 763 764 }; 765 766 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 767 } 768 769 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr, 770 u16 vid) 771 { 772 struct dsa_notifier_fdb_info info = { 773 .sw_index = dp->ds->index, 774 .port = dp->index, 775 .addr = addr, 776 .vid = vid, 777 }; 778 struct dsa_port *cpu_dp = dp->cpu_dp; 779 int err; 780 781 err = dev_uc_add(cpu_dp->master, addr); 782 if (err) 783 return err; 784 785 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 786 } 787 788 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr, 789 u16 vid) 790 { 791 struct dsa_notifier_fdb_info info = { 792 .sw_index = dp->ds->index, 793 .port = dp->index, 794 .addr = addr, 795 .vid = vid, 796 }; 797 struct dsa_port *cpu_dp = dp->cpu_dp; 798 int err; 799 800 err = dev_uc_del(cpu_dp->master, addr); 801 if (err) 802 return err; 803 804 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 805 } 806 807 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 808 { 809 struct dsa_switch *ds = dp->ds; 810 int port = dp->index; 811 812 if (!ds->ops->port_fdb_dump) 813 return -EOPNOTSUPP; 814 815 return ds->ops->port_fdb_dump(ds, port, cb, data); 816 } 817 818 int dsa_port_mdb_add(const struct dsa_port *dp, 819 const struct switchdev_obj_port_mdb *mdb) 820 { 821 struct dsa_notifier_mdb_info info = { 822 .sw_index = dp->ds->index, 823 .port = dp->index, 824 .mdb = mdb, 825 }; 826 827 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 828 } 829 830 int dsa_port_mdb_del(const struct dsa_port *dp, 831 const struct switchdev_obj_port_mdb *mdb) 832 { 833 struct dsa_notifier_mdb_info info = { 834 .sw_index = dp->ds->index, 835 .port = dp->index, 836 .mdb = mdb, 837 }; 838 839 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 840 } 841 842 int dsa_port_host_mdb_add(const struct dsa_port *dp, 843 const struct switchdev_obj_port_mdb *mdb) 844 { 845 struct dsa_notifier_mdb_info info = { 846 .sw_index = dp->ds->index, 847 .port = dp->index, 848 .mdb = mdb, 849 }; 850 struct dsa_port *cpu_dp = dp->cpu_dp; 851 int err; 852 853 err = dev_mc_add(cpu_dp->master, mdb->addr); 854 if (err) 855 return err; 856 857 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 858 } 859 860 int dsa_port_host_mdb_del(const struct dsa_port *dp, 861 const struct switchdev_obj_port_mdb *mdb) 862 { 863 struct dsa_notifier_mdb_info info = { 864 .sw_index = dp->ds->index, 865 .port = dp->index, 866 .mdb = mdb, 867 }; 868 struct dsa_port *cpu_dp = dp->cpu_dp; 869 int err; 870 871 err = dev_mc_del(cpu_dp->master, mdb->addr); 872 if (err) 873 return err; 874 875 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 876 } 877 878 int dsa_port_vlan_add(struct dsa_port *dp, 879 const struct switchdev_obj_port_vlan *vlan, 880 struct netlink_ext_ack *extack) 881 { 882 struct dsa_notifier_vlan_info info = { 883 .sw_index = dp->ds->index, 884 .port = dp->index, 885 .vlan = vlan, 886 .extack = extack, 887 }; 888 889 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 890 } 891 892 int dsa_port_vlan_del(struct dsa_port *dp, 893 const struct switchdev_obj_port_vlan *vlan) 894 { 895 struct dsa_notifier_vlan_info info = { 896 .sw_index = dp->ds->index, 897 .port = dp->index, 898 .vlan = vlan, 899 }; 900 901 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 902 } 903 904 int dsa_port_mrp_add(const struct dsa_port *dp, 905 const struct switchdev_obj_mrp *mrp) 906 { 907 struct dsa_notifier_mrp_info info = { 908 .sw_index = dp->ds->index, 909 .port = dp->index, 910 .mrp = mrp, 911 }; 912 913 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info); 914 } 915 916 int dsa_port_mrp_del(const struct dsa_port *dp, 917 const struct switchdev_obj_mrp *mrp) 918 { 919 struct dsa_notifier_mrp_info info = { 920 .sw_index = dp->ds->index, 921 .port = dp->index, 922 .mrp = mrp, 923 }; 924 925 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info); 926 } 927 928 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 929 const struct switchdev_obj_ring_role_mrp *mrp) 930 { 931 struct dsa_notifier_mrp_ring_role_info info = { 932 .sw_index = dp->ds->index, 933 .port = dp->index, 934 .mrp = mrp, 935 }; 936 937 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info); 938 } 939 940 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 941 const struct switchdev_obj_ring_role_mrp *mrp) 942 { 943 struct dsa_notifier_mrp_ring_role_info info = { 944 .sw_index = dp->ds->index, 945 .port = dp->index, 946 .mrp = mrp, 947 }; 948 949 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info); 950 } 951 952 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 953 const struct dsa_device_ops *tag_ops) 954 { 955 cpu_dp->rcv = tag_ops->rcv; 956 cpu_dp->tag_ops = tag_ops; 957 } 958 959 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 960 { 961 struct device_node *phy_dn; 962 struct phy_device *phydev; 963 964 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 965 if (!phy_dn) 966 return NULL; 967 968 phydev = of_phy_find_device(phy_dn); 969 if (!phydev) { 970 of_node_put(phy_dn); 971 return ERR_PTR(-EPROBE_DEFER); 972 } 973 974 of_node_put(phy_dn); 975 return phydev; 976 } 977 978 static void dsa_port_phylink_validate(struct phylink_config *config, 979 unsigned long *supported, 980 struct phylink_link_state *state) 981 { 982 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 983 struct dsa_switch *ds = dp->ds; 984 985 if (!ds->ops->phylink_validate) 986 return; 987 988 ds->ops->phylink_validate(ds, dp->index, supported, state); 989 } 990 991 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 992 struct phylink_link_state *state) 993 { 994 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 995 struct dsa_switch *ds = dp->ds; 996 int err; 997 998 /* Only called for inband modes */ 999 if (!ds->ops->phylink_mac_link_state) { 1000 state->link = 0; 1001 return; 1002 } 1003 1004 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1005 if (err < 0) { 1006 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1007 dp->index, err); 1008 state->link = 0; 1009 } 1010 } 1011 1012 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1013 unsigned int mode, 1014 const struct phylink_link_state *state) 1015 { 1016 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1017 struct dsa_switch *ds = dp->ds; 1018 1019 if (!ds->ops->phylink_mac_config) 1020 return; 1021 1022 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1023 } 1024 1025 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1026 { 1027 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1028 struct dsa_switch *ds = dp->ds; 1029 1030 if (!ds->ops->phylink_mac_an_restart) 1031 return; 1032 1033 ds->ops->phylink_mac_an_restart(ds, dp->index); 1034 } 1035 1036 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1037 unsigned int mode, 1038 phy_interface_t interface) 1039 { 1040 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1041 struct phy_device *phydev = NULL; 1042 struct dsa_switch *ds = dp->ds; 1043 1044 if (dsa_is_user_port(ds, dp->index)) 1045 phydev = dp->slave->phydev; 1046 1047 if (!ds->ops->phylink_mac_link_down) { 1048 if (ds->ops->adjust_link && phydev) 1049 ds->ops->adjust_link(ds, dp->index, phydev); 1050 return; 1051 } 1052 1053 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1054 } 1055 1056 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1057 struct phy_device *phydev, 1058 unsigned int mode, 1059 phy_interface_t interface, 1060 int speed, int duplex, 1061 bool tx_pause, bool rx_pause) 1062 { 1063 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1064 struct dsa_switch *ds = dp->ds; 1065 1066 if (!ds->ops->phylink_mac_link_up) { 1067 if (ds->ops->adjust_link && phydev) 1068 ds->ops->adjust_link(ds, dp->index, phydev); 1069 return; 1070 } 1071 1072 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1073 speed, duplex, tx_pause, rx_pause); 1074 } 1075 1076 const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1077 .validate = dsa_port_phylink_validate, 1078 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1079 .mac_config = dsa_port_phylink_mac_config, 1080 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1081 .mac_link_down = dsa_port_phylink_mac_link_down, 1082 .mac_link_up = dsa_port_phylink_mac_link_up, 1083 }; 1084 1085 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 1086 { 1087 struct dsa_switch *ds = dp->ds; 1088 struct phy_device *phydev; 1089 int port = dp->index; 1090 int err = 0; 1091 1092 phydev = dsa_port_get_phy_device(dp); 1093 if (!phydev) 1094 return 0; 1095 1096 if (IS_ERR(phydev)) 1097 return PTR_ERR(phydev); 1098 1099 if (enable) { 1100 err = genphy_resume(phydev); 1101 if (err < 0) 1102 goto err_put_dev; 1103 1104 err = genphy_read_status(phydev); 1105 if (err < 0) 1106 goto err_put_dev; 1107 } else { 1108 err = genphy_suspend(phydev); 1109 if (err < 0) 1110 goto err_put_dev; 1111 } 1112 1113 if (ds->ops->adjust_link) 1114 ds->ops->adjust_link(ds, port, phydev); 1115 1116 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1117 1118 err_put_dev: 1119 put_device(&phydev->mdio.dev); 1120 return err; 1121 } 1122 1123 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 1124 { 1125 struct device_node *dn = dp->dn; 1126 struct dsa_switch *ds = dp->ds; 1127 struct phy_device *phydev; 1128 int port = dp->index; 1129 phy_interface_t mode; 1130 int err; 1131 1132 err = of_phy_register_fixed_link(dn); 1133 if (err) { 1134 dev_err(ds->dev, 1135 "failed to register the fixed PHY of port %d\n", 1136 port); 1137 return err; 1138 } 1139 1140 phydev = of_phy_find_device(dn); 1141 1142 err = of_get_phy_mode(dn, &mode); 1143 if (err) 1144 mode = PHY_INTERFACE_MODE_NA; 1145 phydev->interface = mode; 1146 1147 genphy_read_status(phydev); 1148 1149 if (ds->ops->adjust_link) 1150 ds->ops->adjust_link(ds, port, phydev); 1151 1152 put_device(&phydev->mdio.dev); 1153 1154 return 0; 1155 } 1156 1157 static int dsa_port_phylink_register(struct dsa_port *dp) 1158 { 1159 struct dsa_switch *ds = dp->ds; 1160 struct device_node *port_dn = dp->dn; 1161 phy_interface_t mode; 1162 int err; 1163 1164 err = of_get_phy_mode(port_dn, &mode); 1165 if (err) 1166 mode = PHY_INTERFACE_MODE_NA; 1167 1168 dp->pl_config.dev = ds->dev; 1169 dp->pl_config.type = PHYLINK_DEV; 1170 dp->pl_config.pcs_poll = ds->pcs_poll; 1171 1172 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), 1173 mode, &dsa_port_phylink_mac_ops); 1174 if (IS_ERR(dp->pl)) { 1175 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1176 return PTR_ERR(dp->pl); 1177 } 1178 1179 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1180 if (err && err != -ENODEV) { 1181 pr_err("could not attach to PHY: %d\n", err); 1182 goto err_phy_connect; 1183 } 1184 1185 return 0; 1186 1187 err_phy_connect: 1188 phylink_destroy(dp->pl); 1189 return err; 1190 } 1191 1192 int dsa_port_link_register_of(struct dsa_port *dp) 1193 { 1194 struct dsa_switch *ds = dp->ds; 1195 struct device_node *phy_np; 1196 int port = dp->index; 1197 1198 if (!ds->ops->adjust_link) { 1199 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 1200 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 1201 if (ds->ops->phylink_mac_link_down) 1202 ds->ops->phylink_mac_link_down(ds, port, 1203 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1204 return dsa_port_phylink_register(dp); 1205 } 1206 return 0; 1207 } 1208 1209 dev_warn(ds->dev, 1210 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1211 1212 if (of_phy_is_fixed_link(dp->dn)) 1213 return dsa_port_fixed_link_register_of(dp); 1214 else 1215 return dsa_port_setup_phy_of(dp, true); 1216 } 1217 1218 void dsa_port_link_unregister_of(struct dsa_port *dp) 1219 { 1220 struct dsa_switch *ds = dp->ds; 1221 1222 if (!ds->ops->adjust_link && dp->pl) { 1223 rtnl_lock(); 1224 phylink_disconnect_phy(dp->pl); 1225 rtnl_unlock(); 1226 phylink_destroy(dp->pl); 1227 dp->pl = NULL; 1228 return; 1229 } 1230 1231 if (of_phy_is_fixed_link(dp->dn)) 1232 of_phy_deregister_fixed_link(dp->dn); 1233 else 1234 dsa_port_setup_phy_of(dp, false); 1235 } 1236 1237 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data) 1238 { 1239 struct phy_device *phydev; 1240 int ret = -EOPNOTSUPP; 1241 1242 if (of_phy_is_fixed_link(dp->dn)) 1243 return ret; 1244 1245 phydev = dsa_port_get_phy_device(dp); 1246 if (IS_ERR_OR_NULL(phydev)) 1247 return ret; 1248 1249 ret = phy_ethtool_get_strings(phydev, data); 1250 put_device(&phydev->mdio.dev); 1251 1252 return ret; 1253 } 1254 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings); 1255 1256 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data) 1257 { 1258 struct phy_device *phydev; 1259 int ret = -EOPNOTSUPP; 1260 1261 if (of_phy_is_fixed_link(dp->dn)) 1262 return ret; 1263 1264 phydev = dsa_port_get_phy_device(dp); 1265 if (IS_ERR_OR_NULL(phydev)) 1266 return ret; 1267 1268 ret = phy_ethtool_get_stats(phydev, NULL, data); 1269 put_device(&phydev->mdio.dev); 1270 1271 return ret; 1272 } 1273 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats); 1274 1275 int dsa_port_get_phy_sset_count(struct dsa_port *dp) 1276 { 1277 struct phy_device *phydev; 1278 int ret = -EOPNOTSUPP; 1279 1280 if (of_phy_is_fixed_link(dp->dn)) 1281 return ret; 1282 1283 phydev = dsa_port_get_phy_device(dp); 1284 if (IS_ERR_OR_NULL(phydev)) 1285 return ret; 1286 1287 ret = phy_ethtool_get_sset_count(phydev); 1288 put_device(&phydev->mdio.dev); 1289 1290 return ret; 1291 } 1292 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count); 1293 1294 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1295 { 1296 struct dsa_notifier_hsr_info info = { 1297 .sw_index = dp->ds->index, 1298 .port = dp->index, 1299 .hsr = hsr, 1300 }; 1301 int err; 1302 1303 dp->hsr_dev = hsr; 1304 1305 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info); 1306 if (err) 1307 dp->hsr_dev = NULL; 1308 1309 return err; 1310 } 1311 1312 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1313 { 1314 struct dsa_notifier_hsr_info info = { 1315 .sw_index = dp->ds->index, 1316 .port = dp->index, 1317 .hsr = hsr, 1318 }; 1319 int err; 1320 1321 dp->hsr_dev = NULL; 1322 1323 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info); 1324 if (err) 1325 dev_err(dp->ds->dev, 1326 "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n", 1327 dp->index, ERR_PTR(err)); 1328 } 1329 1330 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1331 { 1332 struct dsa_notifier_tag_8021q_vlan_info info = { 1333 .tree_index = dp->ds->dst->index, 1334 .sw_index = dp->ds->index, 1335 .port = dp->index, 1336 .vid = vid, 1337 }; 1338 1339 if (broadcast) 1340 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1341 1342 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1343 } 1344 1345 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1346 { 1347 struct dsa_notifier_tag_8021q_vlan_info info = { 1348 .tree_index = dp->ds->dst->index, 1349 .sw_index = dp->ds->index, 1350 .port = dp->index, 1351 .vid = vid, 1352 }; 1353 int err; 1354 1355 if (broadcast) 1356 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1357 else 1358 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1359 if (err) 1360 dev_err(dp->ds->dev, 1361 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1362 dp->index, vid, ERR_PTR(err)); 1363 } 1364