1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch chip, part of a switch fabric 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/if_vlan.h> 13 #include <net/switchdev.h> 14 15 #include "dsa_priv.h" 16 #include "port.h" 17 #include "slave.h" 18 #include "switch.h" 19 20 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, 21 unsigned int ageing_time) 22 { 23 struct dsa_port *dp; 24 25 dsa_switch_for_each_port(dp, ds) 26 if (dp->ageing_time && dp->ageing_time < ageing_time) 27 ageing_time = dp->ageing_time; 28 29 return ageing_time; 30 } 31 32 static int dsa_switch_ageing_time(struct dsa_switch *ds, 33 struct dsa_notifier_ageing_time_info *info) 34 { 35 unsigned int ageing_time = info->ageing_time; 36 37 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min) 38 return -ERANGE; 39 40 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max) 41 return -ERANGE; 42 43 /* Program the fastest ageing time in case of multiple bridges */ 44 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time); 45 46 if (ds->ops->set_ageing_time) 47 return ds->ops->set_ageing_time(ds, ageing_time); 48 49 return 0; 50 } 51 52 static bool dsa_port_mtu_match(struct dsa_port *dp, 53 struct dsa_notifier_mtu_info *info) 54 { 55 return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp); 56 } 57 58 static int dsa_switch_mtu(struct dsa_switch *ds, 59 struct dsa_notifier_mtu_info *info) 60 { 61 struct dsa_port *dp; 62 int ret; 63 64 if (!ds->ops->port_change_mtu) 65 return -EOPNOTSUPP; 66 67 dsa_switch_for_each_port(dp, ds) { 68 if (dsa_port_mtu_match(dp, info)) { 69 ret = ds->ops->port_change_mtu(ds, dp->index, 70 info->mtu); 71 if (ret) 72 return ret; 73 } 74 } 75 76 return 0; 77 } 78 79 static int dsa_switch_bridge_join(struct dsa_switch *ds, 80 struct dsa_notifier_bridge_info *info) 81 { 82 int err; 83 84 if (info->dp->ds == ds) { 85 if (!ds->ops->port_bridge_join) 86 return -EOPNOTSUPP; 87 88 err = ds->ops->port_bridge_join(ds, info->dp->index, 89 info->bridge, 90 &info->tx_fwd_offload, 91 info->extack); 92 if (err) 93 return err; 94 } 95 96 if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) { 97 err = ds->ops->crosschip_bridge_join(ds, 98 info->dp->ds->dst->index, 99 info->dp->ds->index, 100 info->dp->index, 101 info->bridge, 102 info->extack); 103 if (err) 104 return err; 105 } 106 107 return 0; 108 } 109 110 static int dsa_switch_bridge_leave(struct dsa_switch *ds, 111 struct dsa_notifier_bridge_info *info) 112 { 113 if (info->dp->ds == ds && ds->ops->port_bridge_leave) 114 ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge); 115 116 if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave) 117 ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index, 118 info->dp->ds->index, 119 info->dp->index, 120 info->bridge); 121 122 return 0; 123 } 124 125 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing 126 * DSA links) that sit between the targeted port on which the notifier was 127 * emitted and its dedicated CPU port. 128 */ 129 static bool dsa_port_host_address_match(struct dsa_port *dp, 130 const struct dsa_port *targeted_dp) 131 { 132 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 133 134 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 135 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index, 136 cpu_dp->index); 137 138 return false; 139 } 140 141 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list, 142 const unsigned char *addr, u16 vid, 143 struct dsa_db db) 144 { 145 struct dsa_mac_addr *a; 146 147 list_for_each_entry(a, addr_list, list) 148 if (ether_addr_equal(a->addr, addr) && a->vid == vid && 149 dsa_db_equal(&a->db, &db)) 150 return a; 151 152 return NULL; 153 } 154 155 static int dsa_port_do_mdb_add(struct dsa_port *dp, 156 const struct switchdev_obj_port_mdb *mdb, 157 struct dsa_db db) 158 { 159 struct dsa_switch *ds = dp->ds; 160 struct dsa_mac_addr *a; 161 int port = dp->index; 162 int err = 0; 163 164 /* No need to bother with refcounting for user ports */ 165 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 166 return ds->ops->port_mdb_add(ds, port, mdb, db); 167 168 mutex_lock(&dp->addr_lists_lock); 169 170 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 171 if (a) { 172 refcount_inc(&a->refcount); 173 goto out; 174 } 175 176 a = kzalloc(sizeof(*a), GFP_KERNEL); 177 if (!a) { 178 err = -ENOMEM; 179 goto out; 180 } 181 182 err = ds->ops->port_mdb_add(ds, port, mdb, db); 183 if (err) { 184 kfree(a); 185 goto out; 186 } 187 188 ether_addr_copy(a->addr, mdb->addr); 189 a->vid = mdb->vid; 190 a->db = db; 191 refcount_set(&a->refcount, 1); 192 list_add_tail(&a->list, &dp->mdbs); 193 194 out: 195 mutex_unlock(&dp->addr_lists_lock); 196 197 return err; 198 } 199 200 static int dsa_port_do_mdb_del(struct dsa_port *dp, 201 const struct switchdev_obj_port_mdb *mdb, 202 struct dsa_db db) 203 { 204 struct dsa_switch *ds = dp->ds; 205 struct dsa_mac_addr *a; 206 int port = dp->index; 207 int err = 0; 208 209 /* No need to bother with refcounting for user ports */ 210 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 211 return ds->ops->port_mdb_del(ds, port, mdb, db); 212 213 mutex_lock(&dp->addr_lists_lock); 214 215 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 216 if (!a) { 217 err = -ENOENT; 218 goto out; 219 } 220 221 if (!refcount_dec_and_test(&a->refcount)) 222 goto out; 223 224 err = ds->ops->port_mdb_del(ds, port, mdb, db); 225 if (err) { 226 refcount_set(&a->refcount, 1); 227 goto out; 228 } 229 230 list_del(&a->list); 231 kfree(a); 232 233 out: 234 mutex_unlock(&dp->addr_lists_lock); 235 236 return err; 237 } 238 239 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, 240 u16 vid, struct dsa_db db) 241 { 242 struct dsa_switch *ds = dp->ds; 243 struct dsa_mac_addr *a; 244 int port = dp->index; 245 int err = 0; 246 247 /* No need to bother with refcounting for user ports */ 248 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 249 return ds->ops->port_fdb_add(ds, port, addr, vid, db); 250 251 mutex_lock(&dp->addr_lists_lock); 252 253 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 254 if (a) { 255 refcount_inc(&a->refcount); 256 goto out; 257 } 258 259 a = kzalloc(sizeof(*a), GFP_KERNEL); 260 if (!a) { 261 err = -ENOMEM; 262 goto out; 263 } 264 265 err = ds->ops->port_fdb_add(ds, port, addr, vid, db); 266 if (err) { 267 kfree(a); 268 goto out; 269 } 270 271 ether_addr_copy(a->addr, addr); 272 a->vid = vid; 273 a->db = db; 274 refcount_set(&a->refcount, 1); 275 list_add_tail(&a->list, &dp->fdbs); 276 277 out: 278 mutex_unlock(&dp->addr_lists_lock); 279 280 return err; 281 } 282 283 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, 284 u16 vid, struct dsa_db db) 285 { 286 struct dsa_switch *ds = dp->ds; 287 struct dsa_mac_addr *a; 288 int port = dp->index; 289 int err = 0; 290 291 /* No need to bother with refcounting for user ports */ 292 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 293 return ds->ops->port_fdb_del(ds, port, addr, vid, db); 294 295 mutex_lock(&dp->addr_lists_lock); 296 297 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 298 if (!a) { 299 err = -ENOENT; 300 goto out; 301 } 302 303 if (!refcount_dec_and_test(&a->refcount)) 304 goto out; 305 306 err = ds->ops->port_fdb_del(ds, port, addr, vid, db); 307 if (err) { 308 refcount_set(&a->refcount, 1); 309 goto out; 310 } 311 312 list_del(&a->list); 313 kfree(a); 314 315 out: 316 mutex_unlock(&dp->addr_lists_lock); 317 318 return err; 319 } 320 321 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag, 322 const unsigned char *addr, u16 vid, 323 struct dsa_db db) 324 { 325 struct dsa_mac_addr *a; 326 int err = 0; 327 328 mutex_lock(&lag->fdb_lock); 329 330 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 331 if (a) { 332 refcount_inc(&a->refcount); 333 goto out; 334 } 335 336 a = kzalloc(sizeof(*a), GFP_KERNEL); 337 if (!a) { 338 err = -ENOMEM; 339 goto out; 340 } 341 342 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db); 343 if (err) { 344 kfree(a); 345 goto out; 346 } 347 348 ether_addr_copy(a->addr, addr); 349 a->vid = vid; 350 a->db = db; 351 refcount_set(&a->refcount, 1); 352 list_add_tail(&a->list, &lag->fdbs); 353 354 out: 355 mutex_unlock(&lag->fdb_lock); 356 357 return err; 358 } 359 360 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag, 361 const unsigned char *addr, u16 vid, 362 struct dsa_db db) 363 { 364 struct dsa_mac_addr *a; 365 int err = 0; 366 367 mutex_lock(&lag->fdb_lock); 368 369 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 370 if (!a) { 371 err = -ENOENT; 372 goto out; 373 } 374 375 if (!refcount_dec_and_test(&a->refcount)) 376 goto out; 377 378 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db); 379 if (err) { 380 refcount_set(&a->refcount, 1); 381 goto out; 382 } 383 384 list_del(&a->list); 385 kfree(a); 386 387 out: 388 mutex_unlock(&lag->fdb_lock); 389 390 return err; 391 } 392 393 static int dsa_switch_host_fdb_add(struct dsa_switch *ds, 394 struct dsa_notifier_fdb_info *info) 395 { 396 struct dsa_port *dp; 397 int err = 0; 398 399 if (!ds->ops->port_fdb_add) 400 return -EOPNOTSUPP; 401 402 dsa_switch_for_each_port(dp, ds) { 403 if (dsa_port_host_address_match(dp, info->dp)) { 404 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 405 err = dsa_switch_do_lag_fdb_add(ds, dp->lag, 406 info->addr, 407 info->vid, 408 info->db); 409 } else { 410 err = dsa_port_do_fdb_add(dp, info->addr, 411 info->vid, info->db); 412 } 413 if (err) 414 break; 415 } 416 } 417 418 return err; 419 } 420 421 static int dsa_switch_host_fdb_del(struct dsa_switch *ds, 422 struct dsa_notifier_fdb_info *info) 423 { 424 struct dsa_port *dp; 425 int err = 0; 426 427 if (!ds->ops->port_fdb_del) 428 return -EOPNOTSUPP; 429 430 dsa_switch_for_each_port(dp, ds) { 431 if (dsa_port_host_address_match(dp, info->dp)) { 432 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 433 err = dsa_switch_do_lag_fdb_del(ds, dp->lag, 434 info->addr, 435 info->vid, 436 info->db); 437 } else { 438 err = dsa_port_do_fdb_del(dp, info->addr, 439 info->vid, info->db); 440 } 441 if (err) 442 break; 443 } 444 } 445 446 return err; 447 } 448 449 static int dsa_switch_fdb_add(struct dsa_switch *ds, 450 struct dsa_notifier_fdb_info *info) 451 { 452 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 453 struct dsa_port *dp = dsa_to_port(ds, port); 454 455 if (!ds->ops->port_fdb_add) 456 return -EOPNOTSUPP; 457 458 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db); 459 } 460 461 static int dsa_switch_fdb_del(struct dsa_switch *ds, 462 struct dsa_notifier_fdb_info *info) 463 { 464 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 465 struct dsa_port *dp = dsa_to_port(ds, port); 466 467 if (!ds->ops->port_fdb_del) 468 return -EOPNOTSUPP; 469 470 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db); 471 } 472 473 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds, 474 struct dsa_notifier_lag_fdb_info *info) 475 { 476 struct dsa_port *dp; 477 478 if (!ds->ops->lag_fdb_add) 479 return -EOPNOTSUPP; 480 481 /* Notify switch only if it has a port in this LAG */ 482 dsa_switch_for_each_port(dp, ds) 483 if (dsa_port_offloads_lag(dp, info->lag)) 484 return dsa_switch_do_lag_fdb_add(ds, info->lag, 485 info->addr, info->vid, 486 info->db); 487 488 return 0; 489 } 490 491 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds, 492 struct dsa_notifier_lag_fdb_info *info) 493 { 494 struct dsa_port *dp; 495 496 if (!ds->ops->lag_fdb_del) 497 return -EOPNOTSUPP; 498 499 /* Notify switch only if it has a port in this LAG */ 500 dsa_switch_for_each_port(dp, ds) 501 if (dsa_port_offloads_lag(dp, info->lag)) 502 return dsa_switch_do_lag_fdb_del(ds, info->lag, 503 info->addr, info->vid, 504 info->db); 505 506 return 0; 507 } 508 509 static int dsa_switch_lag_change(struct dsa_switch *ds, 510 struct dsa_notifier_lag_info *info) 511 { 512 if (info->dp->ds == ds && ds->ops->port_lag_change) 513 return ds->ops->port_lag_change(ds, info->dp->index); 514 515 if (info->dp->ds != ds && ds->ops->crosschip_lag_change) 516 return ds->ops->crosschip_lag_change(ds, info->dp->ds->index, 517 info->dp->index); 518 519 return 0; 520 } 521 522 static int dsa_switch_lag_join(struct dsa_switch *ds, 523 struct dsa_notifier_lag_info *info) 524 { 525 if (info->dp->ds == ds && ds->ops->port_lag_join) 526 return ds->ops->port_lag_join(ds, info->dp->index, info->lag, 527 info->info, info->extack); 528 529 if (info->dp->ds != ds && ds->ops->crosschip_lag_join) 530 return ds->ops->crosschip_lag_join(ds, info->dp->ds->index, 531 info->dp->index, info->lag, 532 info->info, info->extack); 533 534 return -EOPNOTSUPP; 535 } 536 537 static int dsa_switch_lag_leave(struct dsa_switch *ds, 538 struct dsa_notifier_lag_info *info) 539 { 540 if (info->dp->ds == ds && ds->ops->port_lag_leave) 541 return ds->ops->port_lag_leave(ds, info->dp->index, info->lag); 542 543 if (info->dp->ds != ds && ds->ops->crosschip_lag_leave) 544 return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index, 545 info->dp->index, info->lag); 546 547 return -EOPNOTSUPP; 548 } 549 550 static int dsa_switch_mdb_add(struct dsa_switch *ds, 551 struct dsa_notifier_mdb_info *info) 552 { 553 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 554 struct dsa_port *dp = dsa_to_port(ds, port); 555 556 if (!ds->ops->port_mdb_add) 557 return -EOPNOTSUPP; 558 559 return dsa_port_do_mdb_add(dp, info->mdb, info->db); 560 } 561 562 static int dsa_switch_mdb_del(struct dsa_switch *ds, 563 struct dsa_notifier_mdb_info *info) 564 { 565 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 566 struct dsa_port *dp = dsa_to_port(ds, port); 567 568 if (!ds->ops->port_mdb_del) 569 return -EOPNOTSUPP; 570 571 return dsa_port_do_mdb_del(dp, info->mdb, info->db); 572 } 573 574 static int dsa_switch_host_mdb_add(struct dsa_switch *ds, 575 struct dsa_notifier_mdb_info *info) 576 { 577 struct dsa_port *dp; 578 int err = 0; 579 580 if (!ds->ops->port_mdb_add) 581 return -EOPNOTSUPP; 582 583 dsa_switch_for_each_port(dp, ds) { 584 if (dsa_port_host_address_match(dp, info->dp)) { 585 err = dsa_port_do_mdb_add(dp, info->mdb, info->db); 586 if (err) 587 break; 588 } 589 } 590 591 return err; 592 } 593 594 static int dsa_switch_host_mdb_del(struct dsa_switch *ds, 595 struct dsa_notifier_mdb_info *info) 596 { 597 struct dsa_port *dp; 598 int err = 0; 599 600 if (!ds->ops->port_mdb_del) 601 return -EOPNOTSUPP; 602 603 dsa_switch_for_each_port(dp, ds) { 604 if (dsa_port_host_address_match(dp, info->dp)) { 605 err = dsa_port_do_mdb_del(dp, info->mdb, info->db); 606 if (err) 607 break; 608 } 609 } 610 611 return err; 612 } 613 614 /* Port VLANs match on the targeted port and on all DSA ports */ 615 static bool dsa_port_vlan_match(struct dsa_port *dp, 616 struct dsa_notifier_vlan_info *info) 617 { 618 return dsa_port_is_dsa(dp) || dp == info->dp; 619 } 620 621 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports 622 * (upstream and downstream) of that switch and its upstream switches. 623 */ 624 static bool dsa_port_host_vlan_match(struct dsa_port *dp, 625 const struct dsa_port *targeted_dp) 626 { 627 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 628 629 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 630 return dsa_port_is_dsa(dp) || dp == cpu_dp; 631 632 return false; 633 } 634 635 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, 636 const struct switchdev_obj_port_vlan *vlan) 637 { 638 struct dsa_vlan *v; 639 640 list_for_each_entry(v, vlan_list, list) 641 if (v->vid == vlan->vid) 642 return v; 643 644 return NULL; 645 } 646 647 static int dsa_port_do_vlan_add(struct dsa_port *dp, 648 const struct switchdev_obj_port_vlan *vlan, 649 struct netlink_ext_ack *extack) 650 { 651 struct dsa_switch *ds = dp->ds; 652 int port = dp->index; 653 struct dsa_vlan *v; 654 int err = 0; 655 656 /* No need to bother with refcounting for user ports. */ 657 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 658 return ds->ops->port_vlan_add(ds, port, vlan, extack); 659 660 /* No need to propagate on shared ports the existing VLANs that were 661 * re-notified after just the flags have changed. This would cause a 662 * refcount bump which we need to avoid, since it unbalances the 663 * additions with the deletions. 664 */ 665 if (vlan->changed) 666 return 0; 667 668 mutex_lock(&dp->vlans_lock); 669 670 v = dsa_vlan_find(&dp->vlans, vlan); 671 if (v) { 672 refcount_inc(&v->refcount); 673 goto out; 674 } 675 676 v = kzalloc(sizeof(*v), GFP_KERNEL); 677 if (!v) { 678 err = -ENOMEM; 679 goto out; 680 } 681 682 err = ds->ops->port_vlan_add(ds, port, vlan, extack); 683 if (err) { 684 kfree(v); 685 goto out; 686 } 687 688 v->vid = vlan->vid; 689 refcount_set(&v->refcount, 1); 690 list_add_tail(&v->list, &dp->vlans); 691 692 out: 693 mutex_unlock(&dp->vlans_lock); 694 695 return err; 696 } 697 698 static int dsa_port_do_vlan_del(struct dsa_port *dp, 699 const struct switchdev_obj_port_vlan *vlan) 700 { 701 struct dsa_switch *ds = dp->ds; 702 int port = dp->index; 703 struct dsa_vlan *v; 704 int err = 0; 705 706 /* No need to bother with refcounting for user ports */ 707 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 708 return ds->ops->port_vlan_del(ds, port, vlan); 709 710 mutex_lock(&dp->vlans_lock); 711 712 v = dsa_vlan_find(&dp->vlans, vlan); 713 if (!v) { 714 err = -ENOENT; 715 goto out; 716 } 717 718 if (!refcount_dec_and_test(&v->refcount)) 719 goto out; 720 721 err = ds->ops->port_vlan_del(ds, port, vlan); 722 if (err) { 723 refcount_set(&v->refcount, 1); 724 goto out; 725 } 726 727 list_del(&v->list); 728 kfree(v); 729 730 out: 731 mutex_unlock(&dp->vlans_lock); 732 733 return err; 734 } 735 736 static int dsa_switch_vlan_add(struct dsa_switch *ds, 737 struct dsa_notifier_vlan_info *info) 738 { 739 struct dsa_port *dp; 740 int err; 741 742 if (!ds->ops->port_vlan_add) 743 return -EOPNOTSUPP; 744 745 dsa_switch_for_each_port(dp, ds) { 746 if (dsa_port_vlan_match(dp, info)) { 747 err = dsa_port_do_vlan_add(dp, info->vlan, 748 info->extack); 749 if (err) 750 return err; 751 } 752 } 753 754 return 0; 755 } 756 757 static int dsa_switch_vlan_del(struct dsa_switch *ds, 758 struct dsa_notifier_vlan_info *info) 759 { 760 struct dsa_port *dp; 761 int err; 762 763 if (!ds->ops->port_vlan_del) 764 return -EOPNOTSUPP; 765 766 dsa_switch_for_each_port(dp, ds) { 767 if (dsa_port_vlan_match(dp, info)) { 768 err = dsa_port_do_vlan_del(dp, info->vlan); 769 if (err) 770 return err; 771 } 772 } 773 774 return 0; 775 } 776 777 static int dsa_switch_host_vlan_add(struct dsa_switch *ds, 778 struct dsa_notifier_vlan_info *info) 779 { 780 struct dsa_port *dp; 781 int err; 782 783 if (!ds->ops->port_vlan_add) 784 return -EOPNOTSUPP; 785 786 dsa_switch_for_each_port(dp, ds) { 787 if (dsa_port_host_vlan_match(dp, info->dp)) { 788 err = dsa_port_do_vlan_add(dp, info->vlan, 789 info->extack); 790 if (err) 791 return err; 792 } 793 } 794 795 return 0; 796 } 797 798 static int dsa_switch_host_vlan_del(struct dsa_switch *ds, 799 struct dsa_notifier_vlan_info *info) 800 { 801 struct dsa_port *dp; 802 int err; 803 804 if (!ds->ops->port_vlan_del) 805 return -EOPNOTSUPP; 806 807 dsa_switch_for_each_port(dp, ds) { 808 if (dsa_port_host_vlan_match(dp, info->dp)) { 809 err = dsa_port_do_vlan_del(dp, info->vlan); 810 if (err) 811 return err; 812 } 813 } 814 815 return 0; 816 } 817 818 static int dsa_switch_change_tag_proto(struct dsa_switch *ds, 819 struct dsa_notifier_tag_proto_info *info) 820 { 821 const struct dsa_device_ops *tag_ops = info->tag_ops; 822 struct dsa_port *dp, *cpu_dp; 823 int err; 824 825 if (!ds->ops->change_tag_protocol) 826 return -EOPNOTSUPP; 827 828 ASSERT_RTNL(); 829 830 err = ds->ops->change_tag_protocol(ds, tag_ops->proto); 831 if (err) 832 return err; 833 834 dsa_switch_for_each_cpu_port(cpu_dp, ds) 835 dsa_port_set_tag_protocol(cpu_dp, tag_ops); 836 837 /* Now that changing the tag protocol can no longer fail, let's update 838 * the remaining bits which are "duplicated for faster access", and the 839 * bits that depend on the tagger, such as the MTU. 840 */ 841 dsa_switch_for_each_user_port(dp, ds) { 842 struct net_device *slave = dp->slave; 843 844 dsa_slave_setup_tagger(slave); 845 846 /* rtnl_mutex is held in dsa_tree_change_tag_proto */ 847 dsa_slave_change_mtu(slave, slave->mtu); 848 } 849 850 return 0; 851 } 852 853 /* We use the same cross-chip notifiers to inform both the tagger side, as well 854 * as the switch side, of connection and disconnection events. 855 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the 856 * switch side doesn't support connecting to this tagger, and therefore, the 857 * fact that we don't disconnect the tagger side doesn't constitute a memory 858 * leak: the tagger will still operate with persistent per-switch memory, just 859 * with the switch side unconnected to it. What does constitute a hard error is 860 * when the switch side supports connecting but fails. 861 */ 862 static int 863 dsa_switch_connect_tag_proto(struct dsa_switch *ds, 864 struct dsa_notifier_tag_proto_info *info) 865 { 866 const struct dsa_device_ops *tag_ops = info->tag_ops; 867 int err; 868 869 /* Notify the new tagger about the connection to this switch */ 870 if (tag_ops->connect) { 871 err = tag_ops->connect(ds); 872 if (err) 873 return err; 874 } 875 876 if (!ds->ops->connect_tag_protocol) 877 return -EOPNOTSUPP; 878 879 /* Notify the switch about the connection to the new tagger */ 880 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); 881 if (err) { 882 /* Revert the new tagger's connection to this tree */ 883 if (tag_ops->disconnect) 884 tag_ops->disconnect(ds); 885 return err; 886 } 887 888 return 0; 889 } 890 891 static int 892 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, 893 struct dsa_notifier_tag_proto_info *info) 894 { 895 const struct dsa_device_ops *tag_ops = info->tag_ops; 896 897 /* Notify the tagger about the disconnection from this switch */ 898 if (tag_ops->disconnect && ds->tagger_data) 899 tag_ops->disconnect(ds); 900 901 /* No need to notify the switch, since it shouldn't have any 902 * resources to tear down 903 */ 904 return 0; 905 } 906 907 static int 908 dsa_switch_master_state_change(struct dsa_switch *ds, 909 struct dsa_notifier_master_state_info *info) 910 { 911 if (!ds->ops->master_state_change) 912 return 0; 913 914 ds->ops->master_state_change(ds, info->master, info->operational); 915 916 return 0; 917 } 918 919 static int dsa_switch_event(struct notifier_block *nb, 920 unsigned long event, void *info) 921 { 922 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); 923 int err; 924 925 switch (event) { 926 case DSA_NOTIFIER_AGEING_TIME: 927 err = dsa_switch_ageing_time(ds, info); 928 break; 929 case DSA_NOTIFIER_BRIDGE_JOIN: 930 err = dsa_switch_bridge_join(ds, info); 931 break; 932 case DSA_NOTIFIER_BRIDGE_LEAVE: 933 err = dsa_switch_bridge_leave(ds, info); 934 break; 935 case DSA_NOTIFIER_FDB_ADD: 936 err = dsa_switch_fdb_add(ds, info); 937 break; 938 case DSA_NOTIFIER_FDB_DEL: 939 err = dsa_switch_fdb_del(ds, info); 940 break; 941 case DSA_NOTIFIER_HOST_FDB_ADD: 942 err = dsa_switch_host_fdb_add(ds, info); 943 break; 944 case DSA_NOTIFIER_HOST_FDB_DEL: 945 err = dsa_switch_host_fdb_del(ds, info); 946 break; 947 case DSA_NOTIFIER_LAG_FDB_ADD: 948 err = dsa_switch_lag_fdb_add(ds, info); 949 break; 950 case DSA_NOTIFIER_LAG_FDB_DEL: 951 err = dsa_switch_lag_fdb_del(ds, info); 952 break; 953 case DSA_NOTIFIER_LAG_CHANGE: 954 err = dsa_switch_lag_change(ds, info); 955 break; 956 case DSA_NOTIFIER_LAG_JOIN: 957 err = dsa_switch_lag_join(ds, info); 958 break; 959 case DSA_NOTIFIER_LAG_LEAVE: 960 err = dsa_switch_lag_leave(ds, info); 961 break; 962 case DSA_NOTIFIER_MDB_ADD: 963 err = dsa_switch_mdb_add(ds, info); 964 break; 965 case DSA_NOTIFIER_MDB_DEL: 966 err = dsa_switch_mdb_del(ds, info); 967 break; 968 case DSA_NOTIFIER_HOST_MDB_ADD: 969 err = dsa_switch_host_mdb_add(ds, info); 970 break; 971 case DSA_NOTIFIER_HOST_MDB_DEL: 972 err = dsa_switch_host_mdb_del(ds, info); 973 break; 974 case DSA_NOTIFIER_VLAN_ADD: 975 err = dsa_switch_vlan_add(ds, info); 976 break; 977 case DSA_NOTIFIER_VLAN_DEL: 978 err = dsa_switch_vlan_del(ds, info); 979 break; 980 case DSA_NOTIFIER_HOST_VLAN_ADD: 981 err = dsa_switch_host_vlan_add(ds, info); 982 break; 983 case DSA_NOTIFIER_HOST_VLAN_DEL: 984 err = dsa_switch_host_vlan_del(ds, info); 985 break; 986 case DSA_NOTIFIER_MTU: 987 err = dsa_switch_mtu(ds, info); 988 break; 989 case DSA_NOTIFIER_TAG_PROTO: 990 err = dsa_switch_change_tag_proto(ds, info); 991 break; 992 case DSA_NOTIFIER_TAG_PROTO_CONNECT: 993 err = dsa_switch_connect_tag_proto(ds, info); 994 break; 995 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: 996 err = dsa_switch_disconnect_tag_proto(ds, info); 997 break; 998 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD: 999 err = dsa_switch_tag_8021q_vlan_add(ds, info); 1000 break; 1001 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: 1002 err = dsa_switch_tag_8021q_vlan_del(ds, info); 1003 break; 1004 case DSA_NOTIFIER_MASTER_STATE_CHANGE: 1005 err = dsa_switch_master_state_change(ds, info); 1006 break; 1007 default: 1008 err = -EOPNOTSUPP; 1009 break; 1010 } 1011 1012 if (err) 1013 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", 1014 event, err); 1015 1016 return notifier_from_errno(err); 1017 } 1018 1019 /** 1020 * dsa_tree_notify - Execute code for all switches in a DSA switch tree. 1021 * @dst: collection of struct dsa_switch devices to notify. 1022 * @e: event, must be of type DSA_NOTIFIER_* 1023 * @v: event-specific value. 1024 * 1025 * Given a struct dsa_switch_tree, this can be used to run a function once for 1026 * each member DSA switch. The other alternative of traversing the tree is only 1027 * through its ports list, which does not uniquely list the switches. 1028 */ 1029 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v) 1030 { 1031 struct raw_notifier_head *nh = &dst->nh; 1032 int err; 1033 1034 err = raw_notifier_call_chain(nh, e, v); 1035 1036 return notifier_to_errno(err); 1037 } 1038 1039 /** 1040 * dsa_broadcast - Notify all DSA trees in the system. 1041 * @e: event, must be of type DSA_NOTIFIER_* 1042 * @v: event-specific value. 1043 * 1044 * Can be used to notify the switching fabric of events such as cross-chip 1045 * bridging between disjoint trees (such as islands of tagger-compatible 1046 * switches bridged by an incompatible middle switch). 1047 * 1048 * WARNING: this function is not reliable during probe time, because probing 1049 * between trees is asynchronous and not all DSA trees might have probed. 1050 */ 1051 int dsa_broadcast(unsigned long e, void *v) 1052 { 1053 struct dsa_switch_tree *dst; 1054 int err = 0; 1055 1056 list_for_each_entry(dst, &dsa_tree_list, list) { 1057 err = dsa_tree_notify(dst, e, v); 1058 if (err) 1059 break; 1060 } 1061 1062 return err; 1063 } 1064 1065 int dsa_switch_register_notifier(struct dsa_switch *ds) 1066 { 1067 ds->nb.notifier_call = dsa_switch_event; 1068 1069 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); 1070 } 1071 1072 void dsa_switch_unregister_notifier(struct dsa_switch *ds) 1073 { 1074 int err; 1075 1076 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); 1077 if (err) 1078 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err); 1079 } 1080