1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch chip, part of a switch fabric 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/if_vlan.h> 13 #include <net/switchdev.h> 14 15 #include "dsa_priv.h" 16 17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, 18 unsigned int ageing_time) 19 { 20 struct dsa_port *dp; 21 22 dsa_switch_for_each_port(dp, ds) 23 if (dp->ageing_time && dp->ageing_time < ageing_time) 24 ageing_time = dp->ageing_time; 25 26 return ageing_time; 27 } 28 29 static int dsa_switch_ageing_time(struct dsa_switch *ds, 30 struct dsa_notifier_ageing_time_info *info) 31 { 32 unsigned int ageing_time = info->ageing_time; 33 34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min) 35 return -ERANGE; 36 37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max) 38 return -ERANGE; 39 40 /* Program the fastest ageing time in case of multiple bridges */ 41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time); 42 43 if (ds->ops->set_ageing_time) 44 return ds->ops->set_ageing_time(ds, ageing_time); 45 46 return 0; 47 } 48 49 static bool dsa_port_mtu_match(struct dsa_port *dp, 50 struct dsa_notifier_mtu_info *info) 51 { 52 if (dp->ds->index == info->sw_index && dp->index == info->port) 53 return true; 54 55 /* Do not propagate to other switches in the tree if the notifier was 56 * targeted for a single switch. 57 */ 58 if (info->targeted_match) 59 return false; 60 61 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) 62 return true; 63 64 return false; 65 } 66 67 static int dsa_switch_mtu(struct dsa_switch *ds, 68 struct dsa_notifier_mtu_info *info) 69 { 70 struct dsa_port *dp; 71 int ret; 72 73 if (!ds->ops->port_change_mtu) 74 return -EOPNOTSUPP; 75 76 dsa_switch_for_each_port(dp, ds) { 77 if (dsa_port_mtu_match(dp, info)) { 78 ret = ds->ops->port_change_mtu(ds, dp->index, 79 info->mtu); 80 if (ret) 81 return ret; 82 } 83 } 84 85 return 0; 86 } 87 88 static int dsa_switch_bridge_join(struct dsa_switch *ds, 89 struct dsa_notifier_bridge_info *info) 90 { 91 struct dsa_switch_tree *dst = ds->dst; 92 int err; 93 94 if (dst->index == info->tree_index && ds->index == info->sw_index) { 95 if (!ds->ops->port_bridge_join) 96 return -EOPNOTSUPP; 97 98 err = ds->ops->port_bridge_join(ds, info->port, info->br); 99 if (err) 100 return err; 101 } 102 103 if ((dst->index != info->tree_index || ds->index != info->sw_index) && 104 ds->ops->crosschip_bridge_join) { 105 err = ds->ops->crosschip_bridge_join(ds, info->tree_index, 106 info->sw_index, 107 info->port, info->br); 108 if (err) 109 return err; 110 } 111 112 return dsa_tag_8021q_bridge_join(ds, info); 113 } 114 115 static int dsa_switch_bridge_leave(struct dsa_switch *ds, 116 struct dsa_notifier_bridge_info *info) 117 { 118 struct dsa_switch_tree *dst = ds->dst; 119 struct netlink_ext_ack extack = {0}; 120 bool change_vlan_filtering = false; 121 bool vlan_filtering; 122 struct dsa_port *dp; 123 int err; 124 125 if (dst->index == info->tree_index && ds->index == info->sw_index && 126 ds->ops->port_bridge_leave) 127 ds->ops->port_bridge_leave(ds, info->port, info->br); 128 129 if ((dst->index != info->tree_index || ds->index != info->sw_index) && 130 ds->ops->crosschip_bridge_leave) 131 ds->ops->crosschip_bridge_leave(ds, info->tree_index, 132 info->sw_index, info->port, 133 info->br); 134 135 if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) { 136 change_vlan_filtering = true; 137 vlan_filtering = true; 138 } else if (!ds->needs_standalone_vlan_filtering && 139 br_vlan_enabled(info->br)) { 140 change_vlan_filtering = true; 141 vlan_filtering = false; 142 } 143 144 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 145 * event for changing vlan_filtering setting upon slave ports leaving 146 * it. That is a good thing, because that lets us handle it and also 147 * handle the case where the switch's vlan_filtering setting is global 148 * (not per port). When that happens, the correct moment to trigger the 149 * vlan_filtering callback is only when the last port leaves the last 150 * VLAN-aware bridge. 151 */ 152 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 153 dsa_switch_for_each_port(dp, ds) { 154 struct net_device *bridge_dev; 155 156 bridge_dev = dp->bridge_dev; 157 158 if (bridge_dev && br_vlan_enabled(bridge_dev)) { 159 change_vlan_filtering = false; 160 break; 161 } 162 } 163 } 164 165 if (change_vlan_filtering) { 166 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port), 167 vlan_filtering, &extack); 168 if (extack._msg) 169 dev_err(ds->dev, "port %d: %s\n", info->port, 170 extack._msg); 171 if (err && err != -EOPNOTSUPP) 172 return err; 173 } 174 175 return dsa_tag_8021q_bridge_leave(ds, info); 176 } 177 178 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing 179 * DSA links) that sit between the targeted port on which the notifier was 180 * emitted and its dedicated CPU port. 181 */ 182 static bool dsa_port_host_address_match(struct dsa_port *dp, 183 int info_sw_index, int info_port) 184 { 185 struct dsa_port *targeted_dp, *cpu_dp; 186 struct dsa_switch *targeted_ds; 187 188 targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index); 189 targeted_dp = dsa_to_port(targeted_ds, info_port); 190 cpu_dp = targeted_dp->cpu_dp; 191 192 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds)) 193 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index, 194 cpu_dp->index); 195 196 return false; 197 } 198 199 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list, 200 const unsigned char *addr, 201 u16 vid) 202 { 203 struct dsa_mac_addr *a; 204 205 list_for_each_entry(a, addr_list, list) 206 if (ether_addr_equal(a->addr, addr) && a->vid == vid) 207 return a; 208 209 return NULL; 210 } 211 212 static int dsa_port_do_mdb_add(struct dsa_port *dp, 213 const struct switchdev_obj_port_mdb *mdb) 214 { 215 struct dsa_switch *ds = dp->ds; 216 struct dsa_mac_addr *a; 217 int port = dp->index; 218 int err = 0; 219 220 /* No need to bother with refcounting for user ports */ 221 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 222 return ds->ops->port_mdb_add(ds, port, mdb); 223 224 mutex_lock(&dp->addr_lists_lock); 225 226 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); 227 if (a) { 228 refcount_inc(&a->refcount); 229 goto out; 230 } 231 232 a = kzalloc(sizeof(*a), GFP_KERNEL); 233 if (!a) { 234 err = -ENOMEM; 235 goto out; 236 } 237 238 err = ds->ops->port_mdb_add(ds, port, mdb); 239 if (err) { 240 kfree(a); 241 goto out; 242 } 243 244 ether_addr_copy(a->addr, mdb->addr); 245 a->vid = mdb->vid; 246 refcount_set(&a->refcount, 1); 247 list_add_tail(&a->list, &dp->mdbs); 248 249 out: 250 mutex_unlock(&dp->addr_lists_lock); 251 252 return err; 253 } 254 255 static int dsa_port_do_mdb_del(struct dsa_port *dp, 256 const struct switchdev_obj_port_mdb *mdb) 257 { 258 struct dsa_switch *ds = dp->ds; 259 struct dsa_mac_addr *a; 260 int port = dp->index; 261 int err = 0; 262 263 /* No need to bother with refcounting for user ports */ 264 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 265 return ds->ops->port_mdb_del(ds, port, mdb); 266 267 mutex_lock(&dp->addr_lists_lock); 268 269 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); 270 if (!a) { 271 err = -ENOENT; 272 goto out; 273 } 274 275 if (!refcount_dec_and_test(&a->refcount)) 276 goto out; 277 278 err = ds->ops->port_mdb_del(ds, port, mdb); 279 if (err) { 280 refcount_set(&a->refcount, 1); 281 goto out; 282 } 283 284 list_del(&a->list); 285 kfree(a); 286 287 out: 288 mutex_unlock(&dp->addr_lists_lock); 289 290 return err; 291 } 292 293 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, 294 u16 vid) 295 { 296 struct dsa_switch *ds = dp->ds; 297 struct dsa_mac_addr *a; 298 int port = dp->index; 299 int err = 0; 300 301 /* No need to bother with refcounting for user ports */ 302 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 303 return ds->ops->port_fdb_add(ds, port, addr, vid); 304 305 mutex_lock(&dp->addr_lists_lock); 306 307 a = dsa_mac_addr_find(&dp->fdbs, addr, vid); 308 if (a) { 309 refcount_inc(&a->refcount); 310 goto out; 311 } 312 313 a = kzalloc(sizeof(*a), GFP_KERNEL); 314 if (!a) { 315 err = -ENOMEM; 316 goto out; 317 } 318 319 err = ds->ops->port_fdb_add(ds, port, addr, vid); 320 if (err) { 321 kfree(a); 322 goto out; 323 } 324 325 ether_addr_copy(a->addr, addr); 326 a->vid = vid; 327 refcount_set(&a->refcount, 1); 328 list_add_tail(&a->list, &dp->fdbs); 329 330 out: 331 mutex_unlock(&dp->addr_lists_lock); 332 333 return err; 334 } 335 336 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, 337 u16 vid) 338 { 339 struct dsa_switch *ds = dp->ds; 340 struct dsa_mac_addr *a; 341 int port = dp->index; 342 int err = 0; 343 344 /* No need to bother with refcounting for user ports */ 345 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 346 return ds->ops->port_fdb_del(ds, port, addr, vid); 347 348 mutex_lock(&dp->addr_lists_lock); 349 350 a = dsa_mac_addr_find(&dp->fdbs, addr, vid); 351 if (!a) { 352 err = -ENOENT; 353 goto out; 354 } 355 356 if (!refcount_dec_and_test(&a->refcount)) 357 goto out; 358 359 err = ds->ops->port_fdb_del(ds, port, addr, vid); 360 if (err) { 361 refcount_set(&a->refcount, 1); 362 goto out; 363 } 364 365 list_del(&a->list); 366 kfree(a); 367 368 out: 369 mutex_unlock(&dp->addr_lists_lock); 370 371 return err; 372 } 373 374 static int dsa_switch_host_fdb_add(struct dsa_switch *ds, 375 struct dsa_notifier_fdb_info *info) 376 { 377 struct dsa_port *dp; 378 int err = 0; 379 380 if (!ds->ops->port_fdb_add) 381 return -EOPNOTSUPP; 382 383 dsa_switch_for_each_port(dp, ds) { 384 if (dsa_port_host_address_match(dp, info->sw_index, 385 info->port)) { 386 err = dsa_port_do_fdb_add(dp, info->addr, info->vid); 387 if (err) 388 break; 389 } 390 } 391 392 return err; 393 } 394 395 static int dsa_switch_host_fdb_del(struct dsa_switch *ds, 396 struct dsa_notifier_fdb_info *info) 397 { 398 struct dsa_port *dp; 399 int err = 0; 400 401 if (!ds->ops->port_fdb_del) 402 return -EOPNOTSUPP; 403 404 dsa_switch_for_each_port(dp, ds) { 405 if (dsa_port_host_address_match(dp, info->sw_index, 406 info->port)) { 407 err = dsa_port_do_fdb_del(dp, info->addr, info->vid); 408 if (err) 409 break; 410 } 411 } 412 413 return err; 414 } 415 416 static int dsa_switch_fdb_add(struct dsa_switch *ds, 417 struct dsa_notifier_fdb_info *info) 418 { 419 int port = dsa_towards_port(ds, info->sw_index, info->port); 420 struct dsa_port *dp = dsa_to_port(ds, port); 421 422 if (!ds->ops->port_fdb_add) 423 return -EOPNOTSUPP; 424 425 return dsa_port_do_fdb_add(dp, info->addr, info->vid); 426 } 427 428 static int dsa_switch_fdb_del(struct dsa_switch *ds, 429 struct dsa_notifier_fdb_info *info) 430 { 431 int port = dsa_towards_port(ds, info->sw_index, info->port); 432 struct dsa_port *dp = dsa_to_port(ds, port); 433 434 if (!ds->ops->port_fdb_del) 435 return -EOPNOTSUPP; 436 437 return dsa_port_do_fdb_del(dp, info->addr, info->vid); 438 } 439 440 static int dsa_switch_hsr_join(struct dsa_switch *ds, 441 struct dsa_notifier_hsr_info *info) 442 { 443 if (ds->index == info->sw_index && ds->ops->port_hsr_join) 444 return ds->ops->port_hsr_join(ds, info->port, info->hsr); 445 446 return -EOPNOTSUPP; 447 } 448 449 static int dsa_switch_hsr_leave(struct dsa_switch *ds, 450 struct dsa_notifier_hsr_info *info) 451 { 452 if (ds->index == info->sw_index && ds->ops->port_hsr_leave) 453 return ds->ops->port_hsr_leave(ds, info->port, info->hsr); 454 455 return -EOPNOTSUPP; 456 } 457 458 static int dsa_switch_lag_change(struct dsa_switch *ds, 459 struct dsa_notifier_lag_info *info) 460 { 461 if (ds->index == info->sw_index && ds->ops->port_lag_change) 462 return ds->ops->port_lag_change(ds, info->port); 463 464 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change) 465 return ds->ops->crosschip_lag_change(ds, info->sw_index, 466 info->port); 467 468 return 0; 469 } 470 471 static int dsa_switch_lag_join(struct dsa_switch *ds, 472 struct dsa_notifier_lag_info *info) 473 { 474 if (ds->index == info->sw_index && ds->ops->port_lag_join) 475 return ds->ops->port_lag_join(ds, info->port, info->lag, 476 info->info); 477 478 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join) 479 return ds->ops->crosschip_lag_join(ds, info->sw_index, 480 info->port, info->lag, 481 info->info); 482 483 return -EOPNOTSUPP; 484 } 485 486 static int dsa_switch_lag_leave(struct dsa_switch *ds, 487 struct dsa_notifier_lag_info *info) 488 { 489 if (ds->index == info->sw_index && ds->ops->port_lag_leave) 490 return ds->ops->port_lag_leave(ds, info->port, info->lag); 491 492 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave) 493 return ds->ops->crosschip_lag_leave(ds, info->sw_index, 494 info->port, info->lag); 495 496 return -EOPNOTSUPP; 497 } 498 499 static int dsa_switch_mdb_add(struct dsa_switch *ds, 500 struct dsa_notifier_mdb_info *info) 501 { 502 int port = dsa_towards_port(ds, info->sw_index, info->port); 503 struct dsa_port *dp = dsa_to_port(ds, port); 504 505 if (!ds->ops->port_mdb_add) 506 return -EOPNOTSUPP; 507 508 return dsa_port_do_mdb_add(dp, info->mdb); 509 } 510 511 static int dsa_switch_mdb_del(struct dsa_switch *ds, 512 struct dsa_notifier_mdb_info *info) 513 { 514 int port = dsa_towards_port(ds, info->sw_index, info->port); 515 struct dsa_port *dp = dsa_to_port(ds, port); 516 517 if (!ds->ops->port_mdb_del) 518 return -EOPNOTSUPP; 519 520 return dsa_port_do_mdb_del(dp, info->mdb); 521 } 522 523 static int dsa_switch_host_mdb_add(struct dsa_switch *ds, 524 struct dsa_notifier_mdb_info *info) 525 { 526 struct dsa_port *dp; 527 int err = 0; 528 529 if (!ds->ops->port_mdb_add) 530 return -EOPNOTSUPP; 531 532 dsa_switch_for_each_port(dp, ds) { 533 if (dsa_port_host_address_match(dp, info->sw_index, 534 info->port)) { 535 err = dsa_port_do_mdb_add(dp, info->mdb); 536 if (err) 537 break; 538 } 539 } 540 541 return err; 542 } 543 544 static int dsa_switch_host_mdb_del(struct dsa_switch *ds, 545 struct dsa_notifier_mdb_info *info) 546 { 547 struct dsa_port *dp; 548 int err = 0; 549 550 if (!ds->ops->port_mdb_del) 551 return -EOPNOTSUPP; 552 553 dsa_switch_for_each_port(dp, ds) { 554 if (dsa_port_host_address_match(dp, info->sw_index, 555 info->port)) { 556 err = dsa_port_do_mdb_del(dp, info->mdb); 557 if (err) 558 break; 559 } 560 } 561 562 return err; 563 } 564 565 static bool dsa_port_vlan_match(struct dsa_port *dp, 566 struct dsa_notifier_vlan_info *info) 567 { 568 if (dp->ds->index == info->sw_index && dp->index == info->port) 569 return true; 570 571 if (dsa_port_is_dsa(dp)) 572 return true; 573 574 return false; 575 } 576 577 static int dsa_switch_vlan_add(struct dsa_switch *ds, 578 struct dsa_notifier_vlan_info *info) 579 { 580 struct dsa_port *dp; 581 int err; 582 583 if (!ds->ops->port_vlan_add) 584 return -EOPNOTSUPP; 585 586 dsa_switch_for_each_port(dp, ds) { 587 if (dsa_port_vlan_match(dp, info)) { 588 err = ds->ops->port_vlan_add(ds, dp->index, info->vlan, 589 info->extack); 590 if (err) 591 return err; 592 } 593 } 594 595 return 0; 596 } 597 598 static int dsa_switch_vlan_del(struct dsa_switch *ds, 599 struct dsa_notifier_vlan_info *info) 600 { 601 if (!ds->ops->port_vlan_del) 602 return -EOPNOTSUPP; 603 604 if (ds->index == info->sw_index) 605 return ds->ops->port_vlan_del(ds, info->port, info->vlan); 606 607 /* Do not deprogram the DSA links as they may be used as conduit 608 * for other VLAN members in the fabric. 609 */ 610 return 0; 611 } 612 613 static int dsa_switch_change_tag_proto(struct dsa_switch *ds, 614 struct dsa_notifier_tag_proto_info *info) 615 { 616 const struct dsa_device_ops *tag_ops = info->tag_ops; 617 struct dsa_port *dp, *cpu_dp; 618 int err; 619 620 if (!ds->ops->change_tag_protocol) 621 return -EOPNOTSUPP; 622 623 ASSERT_RTNL(); 624 625 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 626 err = ds->ops->change_tag_protocol(ds, cpu_dp->index, 627 tag_ops->proto); 628 if (err) 629 return err; 630 631 dsa_port_set_tag_protocol(cpu_dp, tag_ops); 632 } 633 634 /* Now that changing the tag protocol can no longer fail, let's update 635 * the remaining bits which are "duplicated for faster access", and the 636 * bits that depend on the tagger, such as the MTU. 637 */ 638 dsa_switch_for_each_user_port(dp, ds) { 639 struct net_device *slave = dp->slave; 640 641 dsa_slave_setup_tagger(slave); 642 643 /* rtnl_mutex is held in dsa_tree_change_tag_proto */ 644 dsa_slave_change_mtu(slave, slave->mtu); 645 } 646 647 return 0; 648 } 649 650 static int dsa_switch_mrp_add(struct dsa_switch *ds, 651 struct dsa_notifier_mrp_info *info) 652 { 653 if (!ds->ops->port_mrp_add) 654 return -EOPNOTSUPP; 655 656 if (ds->index == info->sw_index) 657 return ds->ops->port_mrp_add(ds, info->port, info->mrp); 658 659 return 0; 660 } 661 662 static int dsa_switch_mrp_del(struct dsa_switch *ds, 663 struct dsa_notifier_mrp_info *info) 664 { 665 if (!ds->ops->port_mrp_del) 666 return -EOPNOTSUPP; 667 668 if (ds->index == info->sw_index) 669 return ds->ops->port_mrp_del(ds, info->port, info->mrp); 670 671 return 0; 672 } 673 674 static int 675 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds, 676 struct dsa_notifier_mrp_ring_role_info *info) 677 { 678 if (!ds->ops->port_mrp_add) 679 return -EOPNOTSUPP; 680 681 if (ds->index == info->sw_index) 682 return ds->ops->port_mrp_add_ring_role(ds, info->port, 683 info->mrp); 684 685 return 0; 686 } 687 688 static int 689 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds, 690 struct dsa_notifier_mrp_ring_role_info *info) 691 { 692 if (!ds->ops->port_mrp_del) 693 return -EOPNOTSUPP; 694 695 if (ds->index == info->sw_index) 696 return ds->ops->port_mrp_del_ring_role(ds, info->port, 697 info->mrp); 698 699 return 0; 700 } 701 702 static int dsa_switch_event(struct notifier_block *nb, 703 unsigned long event, void *info) 704 { 705 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); 706 int err; 707 708 switch (event) { 709 case DSA_NOTIFIER_AGEING_TIME: 710 err = dsa_switch_ageing_time(ds, info); 711 break; 712 case DSA_NOTIFIER_BRIDGE_JOIN: 713 err = dsa_switch_bridge_join(ds, info); 714 break; 715 case DSA_NOTIFIER_BRIDGE_LEAVE: 716 err = dsa_switch_bridge_leave(ds, info); 717 break; 718 case DSA_NOTIFIER_FDB_ADD: 719 err = dsa_switch_fdb_add(ds, info); 720 break; 721 case DSA_NOTIFIER_FDB_DEL: 722 err = dsa_switch_fdb_del(ds, info); 723 break; 724 case DSA_NOTIFIER_HOST_FDB_ADD: 725 err = dsa_switch_host_fdb_add(ds, info); 726 break; 727 case DSA_NOTIFIER_HOST_FDB_DEL: 728 err = dsa_switch_host_fdb_del(ds, info); 729 break; 730 case DSA_NOTIFIER_HSR_JOIN: 731 err = dsa_switch_hsr_join(ds, info); 732 break; 733 case DSA_NOTIFIER_HSR_LEAVE: 734 err = dsa_switch_hsr_leave(ds, info); 735 break; 736 case DSA_NOTIFIER_LAG_CHANGE: 737 err = dsa_switch_lag_change(ds, info); 738 break; 739 case DSA_NOTIFIER_LAG_JOIN: 740 err = dsa_switch_lag_join(ds, info); 741 break; 742 case DSA_NOTIFIER_LAG_LEAVE: 743 err = dsa_switch_lag_leave(ds, info); 744 break; 745 case DSA_NOTIFIER_MDB_ADD: 746 err = dsa_switch_mdb_add(ds, info); 747 break; 748 case DSA_NOTIFIER_MDB_DEL: 749 err = dsa_switch_mdb_del(ds, info); 750 break; 751 case DSA_NOTIFIER_HOST_MDB_ADD: 752 err = dsa_switch_host_mdb_add(ds, info); 753 break; 754 case DSA_NOTIFIER_HOST_MDB_DEL: 755 err = dsa_switch_host_mdb_del(ds, info); 756 break; 757 case DSA_NOTIFIER_VLAN_ADD: 758 err = dsa_switch_vlan_add(ds, info); 759 break; 760 case DSA_NOTIFIER_VLAN_DEL: 761 err = dsa_switch_vlan_del(ds, info); 762 break; 763 case DSA_NOTIFIER_MTU: 764 err = dsa_switch_mtu(ds, info); 765 break; 766 case DSA_NOTIFIER_TAG_PROTO: 767 err = dsa_switch_change_tag_proto(ds, info); 768 break; 769 case DSA_NOTIFIER_MRP_ADD: 770 err = dsa_switch_mrp_add(ds, info); 771 break; 772 case DSA_NOTIFIER_MRP_DEL: 773 err = dsa_switch_mrp_del(ds, info); 774 break; 775 case DSA_NOTIFIER_MRP_ADD_RING_ROLE: 776 err = dsa_switch_mrp_add_ring_role(ds, info); 777 break; 778 case DSA_NOTIFIER_MRP_DEL_RING_ROLE: 779 err = dsa_switch_mrp_del_ring_role(ds, info); 780 break; 781 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD: 782 err = dsa_switch_tag_8021q_vlan_add(ds, info); 783 break; 784 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: 785 err = dsa_switch_tag_8021q_vlan_del(ds, info); 786 break; 787 default: 788 err = -EOPNOTSUPP; 789 break; 790 } 791 792 if (err) 793 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", 794 event, err); 795 796 return notifier_from_errno(err); 797 } 798 799 int dsa_switch_register_notifier(struct dsa_switch *ds) 800 { 801 ds->nb.notifier_call = dsa_switch_event; 802 803 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); 804 } 805 806 void dsa_switch_unregister_notifier(struct dsa_switch *ds) 807 { 808 int err; 809 810 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); 811 if (err) 812 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err); 813 } 814