1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/notifier.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 14 #include "dsa_priv.h" 15 16 /** 17 * dsa_port_notify - Notify the switching fabric of changes to a port 18 * @dp: port on which change occurred 19 * @e: event, must be of type DSA_NOTIFIER_* 20 * @v: event-specific value. 21 * 22 * Notify all switches in the DSA tree that this port's switch belongs to, 23 * including this switch itself, of an event. Allows the other switches to 24 * reconfigure themselves for cross-chip operations. Can also be used to 25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever 26 * a user port's state changes. 27 */ 28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) 29 { 30 return dsa_tree_notify(dp->ds->dst, e, v); 31 } 32 33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid) 34 { 35 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 36 struct switchdev_notifier_fdb_info info = { 37 .vid = vid, 38 }; 39 40 /* When the port becomes standalone it has already left the bridge. 41 * Don't notify the bridge in that case. 42 */ 43 if (!brport_dev) 44 return; 45 46 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 47 brport_dev, &info.info, NULL); 48 } 49 50 static void dsa_port_fast_age(const struct dsa_port *dp) 51 { 52 struct dsa_switch *ds = dp->ds; 53 54 if (!ds->ops->port_fast_age) 55 return; 56 57 ds->ops->port_fast_age(ds, dp->index); 58 59 /* flush all VLANs */ 60 dsa_port_notify_bridge_fdb_flush(dp, 0); 61 } 62 63 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid) 64 { 65 struct dsa_switch *ds = dp->ds; 66 int err; 67 68 if (!ds->ops->port_vlan_fast_age) 69 return -EOPNOTSUPP; 70 71 err = ds->ops->port_vlan_fast_age(ds, dp->index, vid); 72 73 if (!err) 74 dsa_port_notify_bridge_fdb_flush(dp, vid); 75 76 return err; 77 } 78 79 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti) 80 { 81 DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 }; 82 int err, vid; 83 84 err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids); 85 if (err) 86 return err; 87 88 for_each_set_bit(vid, vids, VLAN_N_VID) { 89 err = dsa_port_vlan_fast_age(dp, vid); 90 if (err) 91 return err; 92 } 93 94 return 0; 95 } 96 97 static bool dsa_port_can_configure_learning(struct dsa_port *dp) 98 { 99 struct switchdev_brport_flags flags = { 100 .mask = BR_LEARNING, 101 }; 102 struct dsa_switch *ds = dp->ds; 103 int err; 104 105 if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags) 106 return false; 107 108 err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL); 109 return !err; 110 } 111 112 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) 113 { 114 struct dsa_switch *ds = dp->ds; 115 int port = dp->index; 116 117 if (!ds->ops->port_stp_state_set) 118 return -EOPNOTSUPP; 119 120 ds->ops->port_stp_state_set(ds, port, state); 121 122 if (!dsa_port_can_configure_learning(dp) || 123 (do_fast_age && dp->learning)) { 124 /* Fast age FDB entries or flush appropriate forwarding database 125 * for the given port, if we are moving it from Learning or 126 * Forwarding state, to Disabled or Blocking or Listening state. 127 * Ports that were standalone before the STP state change don't 128 * need to fast age the FDB, since address learning is off in 129 * standalone mode. 130 */ 131 132 if ((dp->stp_state == BR_STATE_LEARNING || 133 dp->stp_state == BR_STATE_FORWARDING) && 134 (state == BR_STATE_DISABLED || 135 state == BR_STATE_BLOCKING || 136 state == BR_STATE_LISTENING)) 137 dsa_port_fast_age(dp); 138 } 139 140 dp->stp_state = state; 141 142 return 0; 143 } 144 145 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, 146 bool do_fast_age) 147 { 148 int err; 149 150 err = dsa_port_set_state(dp, state, do_fast_age); 151 if (err) 152 pr_err("DSA: failed to set STP state %u (%d)\n", state, err); 153 } 154 155 int dsa_port_set_mst_state(struct dsa_port *dp, 156 const struct switchdev_mst_state *state, 157 struct netlink_ext_ack *extack) 158 { 159 struct dsa_switch *ds = dp->ds; 160 u8 prev_state; 161 int err; 162 163 if (!ds->ops->port_mst_state_set) 164 return -EOPNOTSUPP; 165 166 err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti, 167 &prev_state); 168 if (err) 169 return err; 170 171 err = ds->ops->port_mst_state_set(ds, dp->index, state); 172 if (err) 173 return err; 174 175 if (!(dp->learning && 176 (prev_state == BR_STATE_LEARNING || 177 prev_state == BR_STATE_FORWARDING) && 178 (state->state == BR_STATE_DISABLED || 179 state->state == BR_STATE_BLOCKING || 180 state->state == BR_STATE_LISTENING))) 181 return 0; 182 183 err = dsa_port_msti_fast_age(dp, state->msti); 184 if (err) 185 NL_SET_ERR_MSG_MOD(extack, 186 "Unable to flush associated VLANs"); 187 188 return 0; 189 } 190 191 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) 192 { 193 struct dsa_switch *ds = dp->ds; 194 int port = dp->index; 195 int err; 196 197 if (ds->ops->port_enable) { 198 err = ds->ops->port_enable(ds, port, phy); 199 if (err) 200 return err; 201 } 202 203 if (!dp->bridge) 204 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); 205 206 if (dp->pl) 207 phylink_start(dp->pl); 208 209 return 0; 210 } 211 212 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 213 { 214 int err; 215 216 rtnl_lock(); 217 err = dsa_port_enable_rt(dp, phy); 218 rtnl_unlock(); 219 220 return err; 221 } 222 223 void dsa_port_disable_rt(struct dsa_port *dp) 224 { 225 struct dsa_switch *ds = dp->ds; 226 int port = dp->index; 227 228 if (dp->pl) 229 phylink_stop(dp->pl); 230 231 if (!dp->bridge) 232 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); 233 234 if (ds->ops->port_disable) 235 ds->ops->port_disable(ds, port); 236 } 237 238 void dsa_port_disable(struct dsa_port *dp) 239 { 240 rtnl_lock(); 241 dsa_port_disable_rt(dp); 242 rtnl_unlock(); 243 } 244 245 static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, 246 struct dsa_bridge bridge) 247 { 248 struct netlink_ext_ack extack = {0}; 249 bool change_vlan_filtering = false; 250 struct dsa_switch *ds = dp->ds; 251 bool vlan_filtering; 252 int err; 253 254 if (ds->needs_standalone_vlan_filtering && 255 !br_vlan_enabled(bridge.dev)) { 256 change_vlan_filtering = true; 257 vlan_filtering = true; 258 } else if (!ds->needs_standalone_vlan_filtering && 259 br_vlan_enabled(bridge.dev)) { 260 change_vlan_filtering = true; 261 vlan_filtering = false; 262 } 263 264 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 265 * event for changing vlan_filtering setting upon slave ports leaving 266 * it. That is a good thing, because that lets us handle it and also 267 * handle the case where the switch's vlan_filtering setting is global 268 * (not per port). When that happens, the correct moment to trigger the 269 * vlan_filtering callback is only when the last port leaves the last 270 * VLAN-aware bridge. 271 */ 272 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 273 dsa_switch_for_each_port(dp, ds) { 274 struct net_device *br = dsa_port_bridge_dev_get(dp); 275 276 if (br && br_vlan_enabled(br)) { 277 change_vlan_filtering = false; 278 break; 279 } 280 } 281 } 282 283 if (!change_vlan_filtering) 284 return; 285 286 err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); 287 if (extack._msg) { 288 dev_err(ds->dev, "port %d: %s\n", dp->index, 289 extack._msg); 290 } 291 if (err && err != -EOPNOTSUPP) { 292 dev_err(ds->dev, 293 "port %d failed to reset VLAN filtering to %d: %pe\n", 294 dp->index, vlan_filtering, ERR_PTR(err)); 295 } 296 } 297 298 static int dsa_port_inherit_brport_flags(struct dsa_port *dp, 299 struct netlink_ext_ack *extack) 300 { 301 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 302 BR_BCAST_FLOOD | BR_PORT_LOCKED; 303 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 304 int flag, err; 305 306 for_each_set_bit(flag, &mask, 32) { 307 struct switchdev_brport_flags flags = {0}; 308 309 flags.mask = BIT(flag); 310 311 if (br_port_flag_is_set(brport_dev, BIT(flag))) 312 flags.val = BIT(flag); 313 314 err = dsa_port_bridge_flags(dp, flags, extack); 315 if (err && err != -EOPNOTSUPP) 316 return err; 317 } 318 319 return 0; 320 } 321 322 static void dsa_port_clear_brport_flags(struct dsa_port *dp) 323 { 324 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 325 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 326 BR_BCAST_FLOOD | BR_PORT_LOCKED; 327 int flag, err; 328 329 for_each_set_bit(flag, &mask, 32) { 330 struct switchdev_brport_flags flags = {0}; 331 332 flags.mask = BIT(flag); 333 flags.val = val & BIT(flag); 334 335 err = dsa_port_bridge_flags(dp, flags, NULL); 336 if (err && err != -EOPNOTSUPP) 337 dev_err(dp->ds->dev, 338 "failed to clear bridge port flag %lu: %pe\n", 339 flags.val, ERR_PTR(err)); 340 } 341 } 342 343 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, 344 struct netlink_ext_ack *extack) 345 { 346 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 347 struct net_device *br = dsa_port_bridge_dev_get(dp); 348 int err; 349 350 err = dsa_port_inherit_brport_flags(dp, extack); 351 if (err) 352 return err; 353 354 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false); 355 if (err && err != -EOPNOTSUPP) 356 return err; 357 358 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack); 359 if (err && err != -EOPNOTSUPP) 360 return err; 361 362 err = dsa_port_ageing_time(dp, br_get_ageing_time(br)); 363 if (err && err != -EOPNOTSUPP) 364 return err; 365 366 return 0; 367 } 368 369 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, 370 struct dsa_bridge bridge) 371 { 372 /* Configure the port for standalone mode (no address learning, 373 * flood everything). 374 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events 375 * when the user requests it through netlink or sysfs, but not 376 * automatically at port join or leave, so we need to handle resetting 377 * the brport flags ourselves. But we even prefer it that way, because 378 * otherwise, some setups might never get the notification they need, 379 * for example, when a port leaves a LAG that offloads the bridge, 380 * it becomes standalone, but as far as the bridge is concerned, no 381 * port ever left. 382 */ 383 dsa_port_clear_brport_flags(dp); 384 385 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, 386 * so allow it to be in BR_STATE_FORWARDING to be kept functional 387 */ 388 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); 389 390 dsa_port_reset_vlan_filtering(dp, bridge); 391 392 /* Ageing time may be global to the switch chip, so don't change it 393 * here because we have no good reason (or value) to change it to. 394 */ 395 } 396 397 static int dsa_port_bridge_create(struct dsa_port *dp, 398 struct net_device *br, 399 struct netlink_ext_ack *extack) 400 { 401 struct dsa_switch *ds = dp->ds; 402 struct dsa_bridge *bridge; 403 404 bridge = dsa_tree_bridge_find(ds->dst, br); 405 if (bridge) { 406 refcount_inc(&bridge->refcount); 407 dp->bridge = bridge; 408 return 0; 409 } 410 411 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 412 if (!bridge) 413 return -ENOMEM; 414 415 refcount_set(&bridge->refcount, 1); 416 417 bridge->dev = br; 418 419 bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); 420 if (ds->max_num_bridges && !bridge->num) { 421 NL_SET_ERR_MSG_MOD(extack, 422 "Range of offloadable bridges exceeded"); 423 kfree(bridge); 424 return -EOPNOTSUPP; 425 } 426 427 dp->bridge = bridge; 428 429 return 0; 430 } 431 432 static void dsa_port_bridge_destroy(struct dsa_port *dp, 433 const struct net_device *br) 434 { 435 struct dsa_bridge *bridge = dp->bridge; 436 437 dp->bridge = NULL; 438 439 if (!refcount_dec_and_test(&bridge->refcount)) 440 return; 441 442 if (bridge->num) 443 dsa_bridge_num_put(br, bridge->num); 444 445 kfree(bridge); 446 } 447 448 static bool dsa_port_supports_mst(struct dsa_port *dp) 449 { 450 struct dsa_switch *ds = dp->ds; 451 452 return ds->ops->vlan_msti_set && 453 ds->ops->port_mst_state_set && 454 ds->ops->port_vlan_fast_age && 455 dsa_port_can_configure_learning(dp); 456 } 457 458 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, 459 struct netlink_ext_ack *extack) 460 { 461 struct dsa_notifier_bridge_info info = { 462 .dp = dp, 463 .extack = extack, 464 }; 465 struct net_device *dev = dp->slave; 466 struct net_device *brport_dev; 467 int err; 468 469 if (br_mst_enabled(br) && !dsa_port_supports_mst(dp)) 470 return -EOPNOTSUPP; 471 472 /* Here the interface is already bridged. Reflect the current 473 * configuration so that drivers can program their chips accordingly. 474 */ 475 err = dsa_port_bridge_create(dp, br, extack); 476 if (err) 477 return err; 478 479 brport_dev = dsa_port_to_bridge_port(dp); 480 481 info.bridge = *dp->bridge; 482 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); 483 if (err) 484 goto out_rollback; 485 486 /* Drivers which support bridge TX forwarding should set this */ 487 dp->bridge->tx_fwd_offload = info.tx_fwd_offload; 488 489 err = switchdev_bridge_port_offload(brport_dev, dev, dp, 490 &dsa_slave_switchdev_notifier, 491 &dsa_slave_switchdev_blocking_notifier, 492 dp->bridge->tx_fwd_offload, extack); 493 if (err) 494 goto out_rollback_unbridge; 495 496 err = dsa_port_switchdev_sync_attrs(dp, extack); 497 if (err) 498 goto out_rollback_unoffload; 499 500 return 0; 501 502 out_rollback_unoffload: 503 switchdev_bridge_port_unoffload(brport_dev, dp, 504 &dsa_slave_switchdev_notifier, 505 &dsa_slave_switchdev_blocking_notifier); 506 dsa_flush_workqueue(); 507 out_rollback_unbridge: 508 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 509 out_rollback: 510 dsa_port_bridge_destroy(dp, br); 511 return err; 512 } 513 514 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) 515 { 516 struct net_device *brport_dev = dsa_port_to_bridge_port(dp); 517 518 /* Don't try to unoffload something that is not offloaded */ 519 if (!brport_dev) 520 return; 521 522 switchdev_bridge_port_unoffload(brport_dev, dp, 523 &dsa_slave_switchdev_notifier, 524 &dsa_slave_switchdev_blocking_notifier); 525 526 dsa_flush_workqueue(); 527 } 528 529 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) 530 { 531 struct dsa_notifier_bridge_info info = { 532 .dp = dp, 533 }; 534 int err; 535 536 /* If the port could not be offloaded to begin with, then 537 * there is nothing to do. 538 */ 539 if (!dp->bridge) 540 return; 541 542 info.bridge = *dp->bridge; 543 544 /* Here the port is already unbridged. Reflect the current configuration 545 * so that drivers can program their chips accordingly. 546 */ 547 dsa_port_bridge_destroy(dp, br); 548 549 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); 550 if (err) 551 dev_err(dp->ds->dev, 552 "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", 553 dp->index, ERR_PTR(err)); 554 555 dsa_port_switchdev_unsync_attrs(dp, info.bridge); 556 } 557 558 int dsa_port_lag_change(struct dsa_port *dp, 559 struct netdev_lag_lower_state_info *linfo) 560 { 561 struct dsa_notifier_lag_info info = { 562 .dp = dp, 563 }; 564 bool tx_enabled; 565 566 if (!dp->lag) 567 return 0; 568 569 /* On statically configured aggregates (e.g. loadbalance 570 * without LACP) ports will always be tx_enabled, even if the 571 * link is down. Thus we require both link_up and tx_enabled 572 * in order to include it in the tx set. 573 */ 574 tx_enabled = linfo->link_up && linfo->tx_enabled; 575 576 if (tx_enabled == dp->lag_tx_enabled) 577 return 0; 578 579 dp->lag_tx_enabled = tx_enabled; 580 581 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info); 582 } 583 584 static int dsa_port_lag_create(struct dsa_port *dp, 585 struct net_device *lag_dev) 586 { 587 struct dsa_switch *ds = dp->ds; 588 struct dsa_lag *lag; 589 590 lag = dsa_tree_lag_find(ds->dst, lag_dev); 591 if (lag) { 592 refcount_inc(&lag->refcount); 593 dp->lag = lag; 594 return 0; 595 } 596 597 lag = kzalloc(sizeof(*lag), GFP_KERNEL); 598 if (!lag) 599 return -ENOMEM; 600 601 refcount_set(&lag->refcount, 1); 602 mutex_init(&lag->fdb_lock); 603 INIT_LIST_HEAD(&lag->fdbs); 604 lag->dev = lag_dev; 605 dsa_lag_map(ds->dst, lag); 606 dp->lag = lag; 607 608 return 0; 609 } 610 611 static void dsa_port_lag_destroy(struct dsa_port *dp) 612 { 613 struct dsa_lag *lag = dp->lag; 614 615 dp->lag = NULL; 616 dp->lag_tx_enabled = false; 617 618 if (!refcount_dec_and_test(&lag->refcount)) 619 return; 620 621 WARN_ON(!list_empty(&lag->fdbs)); 622 dsa_lag_unmap(dp->ds->dst, lag); 623 kfree(lag); 624 } 625 626 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev, 627 struct netdev_lag_upper_info *uinfo, 628 struct netlink_ext_ack *extack) 629 { 630 struct dsa_notifier_lag_info info = { 631 .dp = dp, 632 .info = uinfo, 633 }; 634 struct net_device *bridge_dev; 635 int err; 636 637 err = dsa_port_lag_create(dp, lag_dev); 638 if (err) 639 goto err_lag_create; 640 641 info.lag = *dp->lag; 642 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info); 643 if (err) 644 goto err_lag_join; 645 646 bridge_dev = netdev_master_upper_dev_get(lag_dev); 647 if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) 648 return 0; 649 650 err = dsa_port_bridge_join(dp, bridge_dev, extack); 651 if (err) 652 goto err_bridge_join; 653 654 return 0; 655 656 err_bridge_join: 657 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 658 err_lag_join: 659 dsa_port_lag_destroy(dp); 660 err_lag_create: 661 return err; 662 } 663 664 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 665 { 666 struct net_device *br = dsa_port_bridge_dev_get(dp); 667 668 if (br) 669 dsa_port_pre_bridge_leave(dp, br); 670 } 671 672 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev) 673 { 674 struct net_device *br = dsa_port_bridge_dev_get(dp); 675 struct dsa_notifier_lag_info info = { 676 .dp = dp, 677 }; 678 int err; 679 680 if (!dp->lag) 681 return; 682 683 /* Port might have been part of a LAG that in turn was 684 * attached to a bridge. 685 */ 686 if (br) 687 dsa_port_bridge_leave(dp, br); 688 689 info.lag = *dp->lag; 690 691 dsa_port_lag_destroy(dp); 692 693 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); 694 if (err) 695 dev_err(dp->ds->dev, 696 "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", 697 dp->index, ERR_PTR(err)); 698 } 699 700 /* Must be called under rcu_read_lock() */ 701 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, 702 bool vlan_filtering, 703 struct netlink_ext_ack *extack) 704 { 705 struct dsa_switch *ds = dp->ds; 706 struct dsa_port *other_dp; 707 int err; 708 709 /* VLAN awareness was off, so the question is "can we turn it on". 710 * We may have had 8021q uppers, those need to go. Make sure we don't 711 * enter an inconsistent state: deny changing the VLAN awareness state 712 * as long as we have 8021q uppers. 713 */ 714 if (vlan_filtering && dsa_port_is_user(dp)) { 715 struct net_device *br = dsa_port_bridge_dev_get(dp); 716 struct net_device *upper_dev, *slave = dp->slave; 717 struct list_head *iter; 718 719 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 720 struct bridge_vlan_info br_info; 721 u16 vid; 722 723 if (!is_vlan_dev(upper_dev)) 724 continue; 725 726 vid = vlan_dev_vlan_id(upper_dev); 727 728 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 729 * device, respectively the VID is not found, returning 730 * 0 means success, which is a failure for us here. 731 */ 732 err = br_vlan_get_info(br, vid, &br_info); 733 if (err == 0) { 734 NL_SET_ERR_MSG_MOD(extack, 735 "Must first remove VLAN uppers having VIDs also present in bridge"); 736 return false; 737 } 738 } 739 } 740 741 if (!ds->vlan_filtering_is_global) 742 return true; 743 744 /* For cases where enabling/disabling VLAN awareness is global to the 745 * switch, we need to handle the case where multiple bridges span 746 * different ports of the same switch device and one of them has a 747 * different setting than what is being requested. 748 */ 749 dsa_switch_for_each_port(other_dp, ds) { 750 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); 751 752 /* If it's the same bridge, it also has same 753 * vlan_filtering setting => no need to check 754 */ 755 if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) 756 continue; 757 758 if (br_vlan_enabled(other_br) != vlan_filtering) { 759 NL_SET_ERR_MSG_MOD(extack, 760 "VLAN filtering is a global setting"); 761 return false; 762 } 763 } 764 return true; 765 } 766 767 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, 768 struct netlink_ext_ack *extack) 769 { 770 bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp); 771 struct dsa_switch *ds = dp->ds; 772 bool apply; 773 int err; 774 775 if (!ds->ops->port_vlan_filtering) 776 return -EOPNOTSUPP; 777 778 /* We are called from dsa_slave_switchdev_blocking_event(), 779 * which is not under rcu_read_lock(), unlike 780 * dsa_slave_switchdev_event(). 781 */ 782 rcu_read_lock(); 783 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); 784 rcu_read_unlock(); 785 if (!apply) 786 return -EINVAL; 787 788 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering) 789 return 0; 790 791 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering, 792 extack); 793 if (err) 794 return err; 795 796 if (ds->vlan_filtering_is_global) { 797 struct dsa_port *other_dp; 798 799 ds->vlan_filtering = vlan_filtering; 800 801 dsa_switch_for_each_user_port(other_dp, ds) { 802 struct net_device *slave = dp->slave; 803 804 /* We might be called in the unbind path, so not 805 * all slave devices might still be registered. 806 */ 807 if (!slave) 808 continue; 809 810 err = dsa_slave_manage_vlan_filtering(slave, 811 vlan_filtering); 812 if (err) 813 goto restore; 814 } 815 } else { 816 dp->vlan_filtering = vlan_filtering; 817 818 err = dsa_slave_manage_vlan_filtering(dp->slave, 819 vlan_filtering); 820 if (err) 821 goto restore; 822 } 823 824 return 0; 825 826 restore: 827 ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL); 828 829 if (ds->vlan_filtering_is_global) 830 ds->vlan_filtering = old_vlan_filtering; 831 else 832 dp->vlan_filtering = old_vlan_filtering; 833 834 return err; 835 } 836 837 /* This enforces legacy behavior for switch drivers which assume they can't 838 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 839 */ 840 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) 841 { 842 struct net_device *br = dsa_port_bridge_dev_get(dp); 843 struct dsa_switch *ds = dp->ds; 844 845 if (!br) 846 return false; 847 848 return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); 849 } 850 851 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) 852 { 853 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock); 854 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); 855 struct dsa_notifier_ageing_time_info info; 856 int err; 857 858 info.ageing_time = ageing_time; 859 860 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); 861 if (err) 862 return err; 863 864 dp->ageing_time = ageing_time; 865 866 return 0; 867 } 868 869 int dsa_port_mst_enable(struct dsa_port *dp, bool on, 870 struct netlink_ext_ack *extack) 871 { 872 if (on && !dsa_port_supports_mst(dp)) { 873 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST"); 874 return -EINVAL; 875 } 876 877 return 0; 878 } 879 880 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, 881 struct switchdev_brport_flags flags, 882 struct netlink_ext_ack *extack) 883 { 884 struct dsa_switch *ds = dp->ds; 885 886 if (!ds->ops->port_pre_bridge_flags) 887 return -EINVAL; 888 889 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack); 890 } 891 892 int dsa_port_bridge_flags(struct dsa_port *dp, 893 struct switchdev_brport_flags flags, 894 struct netlink_ext_ack *extack) 895 { 896 struct dsa_switch *ds = dp->ds; 897 int err; 898 899 if (!ds->ops->port_bridge_flags) 900 return -EOPNOTSUPP; 901 902 err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack); 903 if (err) 904 return err; 905 906 if (flags.mask & BR_LEARNING) { 907 bool learning = flags.val & BR_LEARNING; 908 909 if (learning == dp->learning) 910 return 0; 911 912 if ((dp->learning && !learning) && 913 (dp->stp_state == BR_STATE_LEARNING || 914 dp->stp_state == BR_STATE_FORWARDING)) 915 dsa_port_fast_age(dp); 916 917 dp->learning = learning; 918 } 919 920 return 0; 921 } 922 923 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 924 { 925 struct dsa_switch *ds = dp->ds; 926 927 if (ds->ops->port_set_host_flood) 928 ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 929 } 930 931 int dsa_port_vlan_msti(struct dsa_port *dp, 932 const struct switchdev_vlan_msti *msti) 933 { 934 struct dsa_switch *ds = dp->ds; 935 936 if (!ds->ops->vlan_msti_set) 937 return -EOPNOTSUPP; 938 939 return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); 940 } 941 942 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) 943 { 944 struct dsa_notifier_mtu_info info = { 945 .dp = dp, 946 .mtu = new_mtu, 947 }; 948 949 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); 950 } 951 952 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, 953 u16 vid) 954 { 955 struct dsa_notifier_fdb_info info = { 956 .dp = dp, 957 .addr = addr, 958 .vid = vid, 959 .db = { 960 .type = DSA_DB_BRIDGE, 961 .bridge = *dp->bridge, 962 }, 963 }; 964 965 /* Refcounting takes bridge.num as a key, and should be global for all 966 * bridges in the absence of FDB isolation, and per bridge otherwise. 967 * Force the bridge.num to zero here in the absence of FDB isolation. 968 */ 969 if (!dp->ds->fdb_isolation) 970 info.db.bridge.num = 0; 971 972 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); 973 } 974 975 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, 976 u16 vid) 977 { 978 struct dsa_notifier_fdb_info info = { 979 .dp = dp, 980 .addr = addr, 981 .vid = vid, 982 .db = { 983 .type = DSA_DB_BRIDGE, 984 .bridge = *dp->bridge, 985 }, 986 }; 987 988 if (!dp->ds->fdb_isolation) 989 info.db.bridge.num = 0; 990 991 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); 992 } 993 994 static int dsa_port_host_fdb_add(struct dsa_port *dp, 995 const unsigned char *addr, u16 vid, 996 struct dsa_db db) 997 { 998 struct dsa_notifier_fdb_info info = { 999 .dp = dp, 1000 .addr = addr, 1001 .vid = vid, 1002 .db = db, 1003 }; 1004 1005 if (!dp->ds->fdb_isolation) 1006 info.db.bridge.num = 0; 1007 1008 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 1009 } 1010 1011 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, 1012 const unsigned char *addr, u16 vid) 1013 { 1014 struct dsa_db db = { 1015 .type = DSA_DB_PORT, 1016 .dp = dp, 1017 }; 1018 1019 return dsa_port_host_fdb_add(dp, addr, vid, db); 1020 } 1021 1022 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, 1023 const unsigned char *addr, u16 vid) 1024 { 1025 struct dsa_port *cpu_dp = dp->cpu_dp; 1026 struct dsa_db db = { 1027 .type = DSA_DB_BRIDGE, 1028 .bridge = *dp->bridge, 1029 }; 1030 int err; 1031 1032 /* Avoid a call to __dev_set_promiscuity() on the master, which 1033 * requires rtnl_lock(), since we can't guarantee that is held here, 1034 * and we can't take it either. 1035 */ 1036 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1037 err = dev_uc_add(cpu_dp->master, addr); 1038 if (err) 1039 return err; 1040 } 1041 1042 return dsa_port_host_fdb_add(dp, addr, vid, db); 1043 } 1044 1045 static int dsa_port_host_fdb_del(struct dsa_port *dp, 1046 const unsigned char *addr, u16 vid, 1047 struct dsa_db db) 1048 { 1049 struct dsa_notifier_fdb_info info = { 1050 .dp = dp, 1051 .addr = addr, 1052 .vid = vid, 1053 .db = db, 1054 }; 1055 1056 if (!dp->ds->fdb_isolation) 1057 info.db.bridge.num = 0; 1058 1059 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 1060 } 1061 1062 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, 1063 const unsigned char *addr, u16 vid) 1064 { 1065 struct dsa_db db = { 1066 .type = DSA_DB_PORT, 1067 .dp = dp, 1068 }; 1069 1070 return dsa_port_host_fdb_del(dp, addr, vid, db); 1071 } 1072 1073 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, 1074 const unsigned char *addr, u16 vid) 1075 { 1076 struct dsa_port *cpu_dp = dp->cpu_dp; 1077 struct dsa_db db = { 1078 .type = DSA_DB_BRIDGE, 1079 .bridge = *dp->bridge, 1080 }; 1081 int err; 1082 1083 if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 1084 err = dev_uc_del(cpu_dp->master, addr); 1085 if (err) 1086 return err; 1087 } 1088 1089 return dsa_port_host_fdb_del(dp, addr, vid, db); 1090 } 1091 1092 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr, 1093 u16 vid) 1094 { 1095 struct dsa_notifier_lag_fdb_info info = { 1096 .lag = dp->lag, 1097 .addr = addr, 1098 .vid = vid, 1099 .db = { 1100 .type = DSA_DB_BRIDGE, 1101 .bridge = *dp->bridge, 1102 }, 1103 }; 1104 1105 if (!dp->ds->fdb_isolation) 1106 info.db.bridge.num = 0; 1107 1108 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info); 1109 } 1110 1111 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr, 1112 u16 vid) 1113 { 1114 struct dsa_notifier_lag_fdb_info info = { 1115 .lag = dp->lag, 1116 .addr = addr, 1117 .vid = vid, 1118 .db = { 1119 .type = DSA_DB_BRIDGE, 1120 .bridge = *dp->bridge, 1121 }, 1122 }; 1123 1124 if (!dp->ds->fdb_isolation) 1125 info.db.bridge.num = 0; 1126 1127 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info); 1128 } 1129 1130 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data) 1131 { 1132 struct dsa_switch *ds = dp->ds; 1133 int port = dp->index; 1134 1135 if (!ds->ops->port_fdb_dump) 1136 return -EOPNOTSUPP; 1137 1138 return ds->ops->port_fdb_dump(ds, port, cb, data); 1139 } 1140 1141 int dsa_port_mdb_add(const struct dsa_port *dp, 1142 const struct switchdev_obj_port_mdb *mdb) 1143 { 1144 struct dsa_notifier_mdb_info info = { 1145 .dp = dp, 1146 .mdb = mdb, 1147 .db = { 1148 .type = DSA_DB_BRIDGE, 1149 .bridge = *dp->bridge, 1150 }, 1151 }; 1152 1153 if (!dp->ds->fdb_isolation) 1154 info.db.bridge.num = 0; 1155 1156 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info); 1157 } 1158 1159 int dsa_port_mdb_del(const struct dsa_port *dp, 1160 const struct switchdev_obj_port_mdb *mdb) 1161 { 1162 struct dsa_notifier_mdb_info info = { 1163 .dp = dp, 1164 .mdb = mdb, 1165 .db = { 1166 .type = DSA_DB_BRIDGE, 1167 .bridge = *dp->bridge, 1168 }, 1169 }; 1170 1171 if (!dp->ds->fdb_isolation) 1172 info.db.bridge.num = 0; 1173 1174 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); 1175 } 1176 1177 static int dsa_port_host_mdb_add(const struct dsa_port *dp, 1178 const struct switchdev_obj_port_mdb *mdb, 1179 struct dsa_db db) 1180 { 1181 struct dsa_notifier_mdb_info info = { 1182 .dp = dp, 1183 .mdb = mdb, 1184 .db = db, 1185 }; 1186 1187 if (!dp->ds->fdb_isolation) 1188 info.db.bridge.num = 0; 1189 1190 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info); 1191 } 1192 1193 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, 1194 const struct switchdev_obj_port_mdb *mdb) 1195 { 1196 struct dsa_db db = { 1197 .type = DSA_DB_PORT, 1198 .dp = dp, 1199 }; 1200 1201 return dsa_port_host_mdb_add(dp, mdb, db); 1202 } 1203 1204 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, 1205 const struct switchdev_obj_port_mdb *mdb) 1206 { 1207 struct dsa_port *cpu_dp = dp->cpu_dp; 1208 struct dsa_db db = { 1209 .type = DSA_DB_BRIDGE, 1210 .bridge = *dp->bridge, 1211 }; 1212 int err; 1213 1214 err = dev_mc_add(cpu_dp->master, mdb->addr); 1215 if (err) 1216 return err; 1217 1218 return dsa_port_host_mdb_add(dp, mdb, db); 1219 } 1220 1221 static int dsa_port_host_mdb_del(const struct dsa_port *dp, 1222 const struct switchdev_obj_port_mdb *mdb, 1223 struct dsa_db db) 1224 { 1225 struct dsa_notifier_mdb_info info = { 1226 .dp = dp, 1227 .mdb = mdb, 1228 .db = db, 1229 }; 1230 1231 if (!dp->ds->fdb_isolation) 1232 info.db.bridge.num = 0; 1233 1234 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info); 1235 } 1236 1237 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, 1238 const struct switchdev_obj_port_mdb *mdb) 1239 { 1240 struct dsa_db db = { 1241 .type = DSA_DB_PORT, 1242 .dp = dp, 1243 }; 1244 1245 return dsa_port_host_mdb_del(dp, mdb, db); 1246 } 1247 1248 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, 1249 const struct switchdev_obj_port_mdb *mdb) 1250 { 1251 struct dsa_port *cpu_dp = dp->cpu_dp; 1252 struct dsa_db db = { 1253 .type = DSA_DB_BRIDGE, 1254 .bridge = *dp->bridge, 1255 }; 1256 int err; 1257 1258 err = dev_mc_del(cpu_dp->master, mdb->addr); 1259 if (err) 1260 return err; 1261 1262 return dsa_port_host_mdb_del(dp, mdb, db); 1263 } 1264 1265 int dsa_port_vlan_add(struct dsa_port *dp, 1266 const struct switchdev_obj_port_vlan *vlan, 1267 struct netlink_ext_ack *extack) 1268 { 1269 struct dsa_notifier_vlan_info info = { 1270 .dp = dp, 1271 .vlan = vlan, 1272 .extack = extack, 1273 }; 1274 1275 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info); 1276 } 1277 1278 int dsa_port_vlan_del(struct dsa_port *dp, 1279 const struct switchdev_obj_port_vlan *vlan) 1280 { 1281 struct dsa_notifier_vlan_info info = { 1282 .dp = dp, 1283 .vlan = vlan, 1284 }; 1285 1286 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); 1287 } 1288 1289 int dsa_port_host_vlan_add(struct dsa_port *dp, 1290 const struct switchdev_obj_port_vlan *vlan, 1291 struct netlink_ext_ack *extack) 1292 { 1293 struct dsa_notifier_vlan_info info = { 1294 .dp = dp, 1295 .vlan = vlan, 1296 .extack = extack, 1297 }; 1298 struct dsa_port *cpu_dp = dp->cpu_dp; 1299 int err; 1300 1301 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info); 1302 if (err && err != -EOPNOTSUPP) 1303 return err; 1304 1305 vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1306 1307 return err; 1308 } 1309 1310 int dsa_port_host_vlan_del(struct dsa_port *dp, 1311 const struct switchdev_obj_port_vlan *vlan) 1312 { 1313 struct dsa_notifier_vlan_info info = { 1314 .dp = dp, 1315 .vlan = vlan, 1316 }; 1317 struct dsa_port *cpu_dp = dp->cpu_dp; 1318 int err; 1319 1320 err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info); 1321 if (err && err != -EOPNOTSUPP) 1322 return err; 1323 1324 vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid); 1325 1326 return err; 1327 } 1328 1329 int dsa_port_mrp_add(const struct dsa_port *dp, 1330 const struct switchdev_obj_mrp *mrp) 1331 { 1332 struct dsa_switch *ds = dp->ds; 1333 1334 if (!ds->ops->port_mrp_add) 1335 return -EOPNOTSUPP; 1336 1337 return ds->ops->port_mrp_add(ds, dp->index, mrp); 1338 } 1339 1340 int dsa_port_mrp_del(const struct dsa_port *dp, 1341 const struct switchdev_obj_mrp *mrp) 1342 { 1343 struct dsa_switch *ds = dp->ds; 1344 1345 if (!ds->ops->port_mrp_del) 1346 return -EOPNOTSUPP; 1347 1348 return ds->ops->port_mrp_del(ds, dp->index, mrp); 1349 } 1350 1351 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, 1352 const struct switchdev_obj_ring_role_mrp *mrp) 1353 { 1354 struct dsa_switch *ds = dp->ds; 1355 1356 if (!ds->ops->port_mrp_add_ring_role) 1357 return -EOPNOTSUPP; 1358 1359 return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp); 1360 } 1361 1362 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, 1363 const struct switchdev_obj_ring_role_mrp *mrp) 1364 { 1365 struct dsa_switch *ds = dp->ds; 1366 1367 if (!ds->ops->port_mrp_del_ring_role) 1368 return -EOPNOTSUPP; 1369 1370 return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); 1371 } 1372 1373 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, 1374 const struct dsa_device_ops *tag_ops) 1375 { 1376 cpu_dp->rcv = tag_ops->rcv; 1377 cpu_dp->tag_ops = tag_ops; 1378 } 1379 1380 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) 1381 { 1382 struct device_node *phy_dn; 1383 struct phy_device *phydev; 1384 1385 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0); 1386 if (!phy_dn) 1387 return NULL; 1388 1389 phydev = of_phy_find_device(phy_dn); 1390 if (!phydev) { 1391 of_node_put(phy_dn); 1392 return ERR_PTR(-EPROBE_DEFER); 1393 } 1394 1395 of_node_put(phy_dn); 1396 return phydev; 1397 } 1398 1399 static void dsa_port_phylink_validate(struct phylink_config *config, 1400 unsigned long *supported, 1401 struct phylink_link_state *state) 1402 { 1403 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1404 struct dsa_switch *ds = dp->ds; 1405 1406 if (!ds->ops->phylink_validate) { 1407 if (config->mac_capabilities) 1408 phylink_generic_validate(config, supported, state); 1409 return; 1410 } 1411 1412 ds->ops->phylink_validate(ds, dp->index, supported, state); 1413 } 1414 1415 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, 1416 struct phylink_link_state *state) 1417 { 1418 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1419 struct dsa_switch *ds = dp->ds; 1420 int err; 1421 1422 /* Only called for inband modes */ 1423 if (!ds->ops->phylink_mac_link_state) { 1424 state->link = 0; 1425 return; 1426 } 1427 1428 err = ds->ops->phylink_mac_link_state(ds, dp->index, state); 1429 if (err < 0) { 1430 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", 1431 dp->index, err); 1432 state->link = 0; 1433 } 1434 } 1435 1436 static struct phylink_pcs * 1437 dsa_port_phylink_mac_select_pcs(struct phylink_config *config, 1438 phy_interface_t interface) 1439 { 1440 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1441 struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); 1442 struct dsa_switch *ds = dp->ds; 1443 1444 if (ds->ops->phylink_mac_select_pcs) 1445 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); 1446 1447 return pcs; 1448 } 1449 1450 static void dsa_port_phylink_mac_config(struct phylink_config *config, 1451 unsigned int mode, 1452 const struct phylink_link_state *state) 1453 { 1454 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1455 struct dsa_switch *ds = dp->ds; 1456 1457 if (!ds->ops->phylink_mac_config) 1458 return; 1459 1460 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1461 } 1462 1463 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) 1464 { 1465 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1466 struct dsa_switch *ds = dp->ds; 1467 1468 if (!ds->ops->phylink_mac_an_restart) 1469 return; 1470 1471 ds->ops->phylink_mac_an_restart(ds, dp->index); 1472 } 1473 1474 static void dsa_port_phylink_mac_link_down(struct phylink_config *config, 1475 unsigned int mode, 1476 phy_interface_t interface) 1477 { 1478 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1479 struct phy_device *phydev = NULL; 1480 struct dsa_switch *ds = dp->ds; 1481 1482 if (dsa_port_is_user(dp)) 1483 phydev = dp->slave->phydev; 1484 1485 if (!ds->ops->phylink_mac_link_down) { 1486 if (ds->ops->adjust_link && phydev) 1487 ds->ops->adjust_link(ds, dp->index, phydev); 1488 return; 1489 } 1490 1491 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1492 } 1493 1494 static void dsa_port_phylink_mac_link_up(struct phylink_config *config, 1495 struct phy_device *phydev, 1496 unsigned int mode, 1497 phy_interface_t interface, 1498 int speed, int duplex, 1499 bool tx_pause, bool rx_pause) 1500 { 1501 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 1502 struct dsa_switch *ds = dp->ds; 1503 1504 if (!ds->ops->phylink_mac_link_up) { 1505 if (ds->ops->adjust_link && phydev) 1506 ds->ops->adjust_link(ds, dp->index, phydev); 1507 return; 1508 } 1509 1510 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, 1511 speed, duplex, tx_pause, rx_pause); 1512 } 1513 1514 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { 1515 .validate = dsa_port_phylink_validate, 1516 .mac_select_pcs = dsa_port_phylink_mac_select_pcs, 1517 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, 1518 .mac_config = dsa_port_phylink_mac_config, 1519 .mac_an_restart = dsa_port_phylink_mac_an_restart, 1520 .mac_link_down = dsa_port_phylink_mac_link_down, 1521 .mac_link_up = dsa_port_phylink_mac_link_up, 1522 }; 1523 1524 int dsa_port_phylink_create(struct dsa_port *dp) 1525 { 1526 struct dsa_switch *ds = dp->ds; 1527 phy_interface_t mode; 1528 int err; 1529 1530 err = of_get_phy_mode(dp->dn, &mode); 1531 if (err) 1532 mode = PHY_INTERFACE_MODE_NA; 1533 1534 /* Presence of phylink_mac_link_state or phylink_mac_an_restart is 1535 * an indicator of a legacy phylink driver. 1536 */ 1537 if (ds->ops->phylink_mac_link_state || 1538 ds->ops->phylink_mac_an_restart) 1539 dp->pl_config.legacy_pre_march2020 = true; 1540 1541 if (ds->ops->phylink_get_caps) 1542 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); 1543 1544 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), 1545 mode, &dsa_port_phylink_mac_ops); 1546 if (IS_ERR(dp->pl)) { 1547 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1548 return PTR_ERR(dp->pl); 1549 } 1550 1551 return 0; 1552 } 1553 1554 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 1555 { 1556 struct dsa_switch *ds = dp->ds; 1557 struct phy_device *phydev; 1558 int port = dp->index; 1559 int err = 0; 1560 1561 phydev = dsa_port_get_phy_device(dp); 1562 if (!phydev) 1563 return 0; 1564 1565 if (IS_ERR(phydev)) 1566 return PTR_ERR(phydev); 1567 1568 if (enable) { 1569 err = genphy_resume(phydev); 1570 if (err < 0) 1571 goto err_put_dev; 1572 1573 err = genphy_read_status(phydev); 1574 if (err < 0) 1575 goto err_put_dev; 1576 } else { 1577 err = genphy_suspend(phydev); 1578 if (err < 0) 1579 goto err_put_dev; 1580 } 1581 1582 if (ds->ops->adjust_link) 1583 ds->ops->adjust_link(ds, port, phydev); 1584 1585 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev)); 1586 1587 err_put_dev: 1588 put_device(&phydev->mdio.dev); 1589 return err; 1590 } 1591 1592 static int dsa_port_fixed_link_register_of(struct dsa_port *dp) 1593 { 1594 struct device_node *dn = dp->dn; 1595 struct dsa_switch *ds = dp->ds; 1596 struct phy_device *phydev; 1597 int port = dp->index; 1598 phy_interface_t mode; 1599 int err; 1600 1601 err = of_phy_register_fixed_link(dn); 1602 if (err) { 1603 dev_err(ds->dev, 1604 "failed to register the fixed PHY of port %d\n", 1605 port); 1606 return err; 1607 } 1608 1609 phydev = of_phy_find_device(dn); 1610 1611 err = of_get_phy_mode(dn, &mode); 1612 if (err) 1613 mode = PHY_INTERFACE_MODE_NA; 1614 phydev->interface = mode; 1615 1616 genphy_read_status(phydev); 1617 1618 if (ds->ops->adjust_link) 1619 ds->ops->adjust_link(ds, port, phydev); 1620 1621 put_device(&phydev->mdio.dev); 1622 1623 return 0; 1624 } 1625 1626 static int dsa_port_phylink_register(struct dsa_port *dp) 1627 { 1628 struct dsa_switch *ds = dp->ds; 1629 struct device_node *port_dn = dp->dn; 1630 int err; 1631 1632 dp->pl_config.dev = ds->dev; 1633 dp->pl_config.type = PHYLINK_DEV; 1634 1635 err = dsa_port_phylink_create(dp); 1636 if (err) 1637 return err; 1638 1639 err = phylink_of_phy_connect(dp->pl, port_dn, 0); 1640 if (err && err != -ENODEV) { 1641 pr_err("could not attach to PHY: %d\n", err); 1642 goto err_phy_connect; 1643 } 1644 1645 return 0; 1646 1647 err_phy_connect: 1648 phylink_destroy(dp->pl); 1649 return err; 1650 } 1651 1652 int dsa_port_link_register_of(struct dsa_port *dp) 1653 { 1654 struct dsa_switch *ds = dp->ds; 1655 struct device_node *phy_np; 1656 int port = dp->index; 1657 1658 if (!ds->ops->adjust_link) { 1659 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); 1660 if (of_phy_is_fixed_link(dp->dn) || phy_np) { 1661 if (ds->ops->phylink_mac_link_down) 1662 ds->ops->phylink_mac_link_down(ds, port, 1663 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); 1664 of_node_put(phy_np); 1665 return dsa_port_phylink_register(dp); 1666 } 1667 of_node_put(phy_np); 1668 return 0; 1669 } 1670 1671 dev_warn(ds->dev, 1672 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); 1673 1674 if (of_phy_is_fixed_link(dp->dn)) 1675 return dsa_port_fixed_link_register_of(dp); 1676 else 1677 return dsa_port_setup_phy_of(dp, true); 1678 } 1679 1680 void dsa_port_link_unregister_of(struct dsa_port *dp) 1681 { 1682 struct dsa_switch *ds = dp->ds; 1683 1684 if (!ds->ops->adjust_link && dp->pl) { 1685 rtnl_lock(); 1686 phylink_disconnect_phy(dp->pl); 1687 rtnl_unlock(); 1688 phylink_destroy(dp->pl); 1689 dp->pl = NULL; 1690 return; 1691 } 1692 1693 if (of_phy_is_fixed_link(dp->dn)) 1694 of_phy_deregister_fixed_link(dp->dn); 1695 else 1696 dsa_port_setup_phy_of(dp, false); 1697 } 1698 1699 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) 1700 { 1701 struct dsa_switch *ds = dp->ds; 1702 int err; 1703 1704 if (!ds->ops->port_hsr_join) 1705 return -EOPNOTSUPP; 1706 1707 dp->hsr_dev = hsr; 1708 1709 err = ds->ops->port_hsr_join(ds, dp->index, hsr); 1710 if (err) 1711 dp->hsr_dev = NULL; 1712 1713 return err; 1714 } 1715 1716 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) 1717 { 1718 struct dsa_switch *ds = dp->ds; 1719 int err; 1720 1721 dp->hsr_dev = NULL; 1722 1723 if (ds->ops->port_hsr_leave) { 1724 err = ds->ops->port_hsr_leave(ds, dp->index, hsr); 1725 if (err) 1726 dev_err(dp->ds->dev, 1727 "port %d failed to leave HSR %s: %pe\n", 1728 dp->index, hsr->name, ERR_PTR(err)); 1729 } 1730 } 1731 1732 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) 1733 { 1734 struct dsa_notifier_tag_8021q_vlan_info info = { 1735 .dp = dp, 1736 .vid = vid, 1737 }; 1738 1739 if (broadcast) 1740 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1741 1742 return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); 1743 } 1744 1745 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) 1746 { 1747 struct dsa_notifier_tag_8021q_vlan_info info = { 1748 .dp = dp, 1749 .vid = vid, 1750 }; 1751 int err; 1752 1753 if (broadcast) 1754 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1755 else 1756 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); 1757 if (err) 1758 dev_err(dp->ds->dev, 1759 "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", 1760 dp->index, vid, ERR_PTR(err)); 1761 } 1762