1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch chip, part of a switch fabric 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/if_vlan.h> 13 #include <net/switchdev.h> 14 15 #include "dsa.h" 16 #include "dsa_priv.h" 17 #include "port.h" 18 #include "slave.h" 19 #include "switch.h" 20 21 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, 22 unsigned int ageing_time) 23 { 24 struct dsa_port *dp; 25 26 dsa_switch_for_each_port(dp, ds) 27 if (dp->ageing_time && dp->ageing_time < ageing_time) 28 ageing_time = dp->ageing_time; 29 30 return ageing_time; 31 } 32 33 static int dsa_switch_ageing_time(struct dsa_switch *ds, 34 struct dsa_notifier_ageing_time_info *info) 35 { 36 unsigned int ageing_time = info->ageing_time; 37 38 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min) 39 return -ERANGE; 40 41 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max) 42 return -ERANGE; 43 44 /* Program the fastest ageing time in case of multiple bridges */ 45 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time); 46 47 if (ds->ops->set_ageing_time) 48 return ds->ops->set_ageing_time(ds, ageing_time); 49 50 return 0; 51 } 52 53 static bool dsa_port_mtu_match(struct dsa_port *dp, 54 struct dsa_notifier_mtu_info *info) 55 { 56 return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp); 57 } 58 59 static int dsa_switch_mtu(struct dsa_switch *ds, 60 struct dsa_notifier_mtu_info *info) 61 { 62 struct dsa_port *dp; 63 int ret; 64 65 if (!ds->ops->port_change_mtu) 66 return -EOPNOTSUPP; 67 68 dsa_switch_for_each_port(dp, ds) { 69 if (dsa_port_mtu_match(dp, info)) { 70 ret = ds->ops->port_change_mtu(ds, dp->index, 71 info->mtu); 72 if (ret) 73 return ret; 74 } 75 } 76 77 return 0; 78 } 79 80 static int dsa_switch_bridge_join(struct dsa_switch *ds, 81 struct dsa_notifier_bridge_info *info) 82 { 83 int err; 84 85 if (info->dp->ds == ds) { 86 if (!ds->ops->port_bridge_join) 87 return -EOPNOTSUPP; 88 89 err = ds->ops->port_bridge_join(ds, info->dp->index, 90 info->bridge, 91 &info->tx_fwd_offload, 92 info->extack); 93 if (err) 94 return err; 95 } 96 97 if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) { 98 err = ds->ops->crosschip_bridge_join(ds, 99 info->dp->ds->dst->index, 100 info->dp->ds->index, 101 info->dp->index, 102 info->bridge, 103 info->extack); 104 if (err) 105 return err; 106 } 107 108 return 0; 109 } 110 111 static int dsa_switch_bridge_leave(struct dsa_switch *ds, 112 struct dsa_notifier_bridge_info *info) 113 { 114 if (info->dp->ds == ds && ds->ops->port_bridge_leave) 115 ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge); 116 117 if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave) 118 ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index, 119 info->dp->ds->index, 120 info->dp->index, 121 info->bridge); 122 123 return 0; 124 } 125 126 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing 127 * DSA links) that sit between the targeted port on which the notifier was 128 * emitted and its dedicated CPU port. 129 */ 130 static bool dsa_port_host_address_match(struct dsa_port *dp, 131 const struct dsa_port *targeted_dp) 132 { 133 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 134 135 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 136 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index, 137 cpu_dp->index); 138 139 return false; 140 } 141 142 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list, 143 const unsigned char *addr, u16 vid, 144 struct dsa_db db) 145 { 146 struct dsa_mac_addr *a; 147 148 list_for_each_entry(a, addr_list, list) 149 if (ether_addr_equal(a->addr, addr) && a->vid == vid && 150 dsa_db_equal(&a->db, &db)) 151 return a; 152 153 return NULL; 154 } 155 156 static int dsa_port_do_mdb_add(struct dsa_port *dp, 157 const struct switchdev_obj_port_mdb *mdb, 158 struct dsa_db db) 159 { 160 struct dsa_switch *ds = dp->ds; 161 struct dsa_mac_addr *a; 162 int port = dp->index; 163 int err = 0; 164 165 /* No need to bother with refcounting for user ports */ 166 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 167 return ds->ops->port_mdb_add(ds, port, mdb, db); 168 169 mutex_lock(&dp->addr_lists_lock); 170 171 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 172 if (a) { 173 refcount_inc(&a->refcount); 174 goto out; 175 } 176 177 a = kzalloc(sizeof(*a), GFP_KERNEL); 178 if (!a) { 179 err = -ENOMEM; 180 goto out; 181 } 182 183 err = ds->ops->port_mdb_add(ds, port, mdb, db); 184 if (err) { 185 kfree(a); 186 goto out; 187 } 188 189 ether_addr_copy(a->addr, mdb->addr); 190 a->vid = mdb->vid; 191 a->db = db; 192 refcount_set(&a->refcount, 1); 193 list_add_tail(&a->list, &dp->mdbs); 194 195 out: 196 mutex_unlock(&dp->addr_lists_lock); 197 198 return err; 199 } 200 201 static int dsa_port_do_mdb_del(struct dsa_port *dp, 202 const struct switchdev_obj_port_mdb *mdb, 203 struct dsa_db db) 204 { 205 struct dsa_switch *ds = dp->ds; 206 struct dsa_mac_addr *a; 207 int port = dp->index; 208 int err = 0; 209 210 /* No need to bother with refcounting for user ports */ 211 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 212 return ds->ops->port_mdb_del(ds, port, mdb, db); 213 214 mutex_lock(&dp->addr_lists_lock); 215 216 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 217 if (!a) { 218 err = -ENOENT; 219 goto out; 220 } 221 222 if (!refcount_dec_and_test(&a->refcount)) 223 goto out; 224 225 err = ds->ops->port_mdb_del(ds, port, mdb, db); 226 if (err) { 227 refcount_set(&a->refcount, 1); 228 goto out; 229 } 230 231 list_del(&a->list); 232 kfree(a); 233 234 out: 235 mutex_unlock(&dp->addr_lists_lock); 236 237 return err; 238 } 239 240 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, 241 u16 vid, struct dsa_db db) 242 { 243 struct dsa_switch *ds = dp->ds; 244 struct dsa_mac_addr *a; 245 int port = dp->index; 246 int err = 0; 247 248 /* No need to bother with refcounting for user ports */ 249 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 250 return ds->ops->port_fdb_add(ds, port, addr, vid, db); 251 252 mutex_lock(&dp->addr_lists_lock); 253 254 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 255 if (a) { 256 refcount_inc(&a->refcount); 257 goto out; 258 } 259 260 a = kzalloc(sizeof(*a), GFP_KERNEL); 261 if (!a) { 262 err = -ENOMEM; 263 goto out; 264 } 265 266 err = ds->ops->port_fdb_add(ds, port, addr, vid, db); 267 if (err) { 268 kfree(a); 269 goto out; 270 } 271 272 ether_addr_copy(a->addr, addr); 273 a->vid = vid; 274 a->db = db; 275 refcount_set(&a->refcount, 1); 276 list_add_tail(&a->list, &dp->fdbs); 277 278 out: 279 mutex_unlock(&dp->addr_lists_lock); 280 281 return err; 282 } 283 284 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, 285 u16 vid, struct dsa_db db) 286 { 287 struct dsa_switch *ds = dp->ds; 288 struct dsa_mac_addr *a; 289 int port = dp->index; 290 int err = 0; 291 292 /* No need to bother with refcounting for user ports */ 293 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 294 return ds->ops->port_fdb_del(ds, port, addr, vid, db); 295 296 mutex_lock(&dp->addr_lists_lock); 297 298 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 299 if (!a) { 300 err = -ENOENT; 301 goto out; 302 } 303 304 if (!refcount_dec_and_test(&a->refcount)) 305 goto out; 306 307 err = ds->ops->port_fdb_del(ds, port, addr, vid, db); 308 if (err) { 309 refcount_set(&a->refcount, 1); 310 goto out; 311 } 312 313 list_del(&a->list); 314 kfree(a); 315 316 out: 317 mutex_unlock(&dp->addr_lists_lock); 318 319 return err; 320 } 321 322 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag, 323 const unsigned char *addr, u16 vid, 324 struct dsa_db db) 325 { 326 struct dsa_mac_addr *a; 327 int err = 0; 328 329 mutex_lock(&lag->fdb_lock); 330 331 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 332 if (a) { 333 refcount_inc(&a->refcount); 334 goto out; 335 } 336 337 a = kzalloc(sizeof(*a), GFP_KERNEL); 338 if (!a) { 339 err = -ENOMEM; 340 goto out; 341 } 342 343 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db); 344 if (err) { 345 kfree(a); 346 goto out; 347 } 348 349 ether_addr_copy(a->addr, addr); 350 a->vid = vid; 351 a->db = db; 352 refcount_set(&a->refcount, 1); 353 list_add_tail(&a->list, &lag->fdbs); 354 355 out: 356 mutex_unlock(&lag->fdb_lock); 357 358 return err; 359 } 360 361 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag, 362 const unsigned char *addr, u16 vid, 363 struct dsa_db db) 364 { 365 struct dsa_mac_addr *a; 366 int err = 0; 367 368 mutex_lock(&lag->fdb_lock); 369 370 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 371 if (!a) { 372 err = -ENOENT; 373 goto out; 374 } 375 376 if (!refcount_dec_and_test(&a->refcount)) 377 goto out; 378 379 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db); 380 if (err) { 381 refcount_set(&a->refcount, 1); 382 goto out; 383 } 384 385 list_del(&a->list); 386 kfree(a); 387 388 out: 389 mutex_unlock(&lag->fdb_lock); 390 391 return err; 392 } 393 394 static int dsa_switch_host_fdb_add(struct dsa_switch *ds, 395 struct dsa_notifier_fdb_info *info) 396 { 397 struct dsa_port *dp; 398 int err = 0; 399 400 if (!ds->ops->port_fdb_add) 401 return -EOPNOTSUPP; 402 403 dsa_switch_for_each_port(dp, ds) { 404 if (dsa_port_host_address_match(dp, info->dp)) { 405 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 406 err = dsa_switch_do_lag_fdb_add(ds, dp->lag, 407 info->addr, 408 info->vid, 409 info->db); 410 } else { 411 err = dsa_port_do_fdb_add(dp, info->addr, 412 info->vid, info->db); 413 } 414 if (err) 415 break; 416 } 417 } 418 419 return err; 420 } 421 422 static int dsa_switch_host_fdb_del(struct dsa_switch *ds, 423 struct dsa_notifier_fdb_info *info) 424 { 425 struct dsa_port *dp; 426 int err = 0; 427 428 if (!ds->ops->port_fdb_del) 429 return -EOPNOTSUPP; 430 431 dsa_switch_for_each_port(dp, ds) { 432 if (dsa_port_host_address_match(dp, info->dp)) { 433 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 434 err = dsa_switch_do_lag_fdb_del(ds, dp->lag, 435 info->addr, 436 info->vid, 437 info->db); 438 } else { 439 err = dsa_port_do_fdb_del(dp, info->addr, 440 info->vid, info->db); 441 } 442 if (err) 443 break; 444 } 445 } 446 447 return err; 448 } 449 450 static int dsa_switch_fdb_add(struct dsa_switch *ds, 451 struct dsa_notifier_fdb_info *info) 452 { 453 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 454 struct dsa_port *dp = dsa_to_port(ds, port); 455 456 if (!ds->ops->port_fdb_add) 457 return -EOPNOTSUPP; 458 459 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db); 460 } 461 462 static int dsa_switch_fdb_del(struct dsa_switch *ds, 463 struct dsa_notifier_fdb_info *info) 464 { 465 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 466 struct dsa_port *dp = dsa_to_port(ds, port); 467 468 if (!ds->ops->port_fdb_del) 469 return -EOPNOTSUPP; 470 471 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db); 472 } 473 474 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds, 475 struct dsa_notifier_lag_fdb_info *info) 476 { 477 struct dsa_port *dp; 478 479 if (!ds->ops->lag_fdb_add) 480 return -EOPNOTSUPP; 481 482 /* Notify switch only if it has a port in this LAG */ 483 dsa_switch_for_each_port(dp, ds) 484 if (dsa_port_offloads_lag(dp, info->lag)) 485 return dsa_switch_do_lag_fdb_add(ds, info->lag, 486 info->addr, info->vid, 487 info->db); 488 489 return 0; 490 } 491 492 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds, 493 struct dsa_notifier_lag_fdb_info *info) 494 { 495 struct dsa_port *dp; 496 497 if (!ds->ops->lag_fdb_del) 498 return -EOPNOTSUPP; 499 500 /* Notify switch only if it has a port in this LAG */ 501 dsa_switch_for_each_port(dp, ds) 502 if (dsa_port_offloads_lag(dp, info->lag)) 503 return dsa_switch_do_lag_fdb_del(ds, info->lag, 504 info->addr, info->vid, 505 info->db); 506 507 return 0; 508 } 509 510 static int dsa_switch_lag_change(struct dsa_switch *ds, 511 struct dsa_notifier_lag_info *info) 512 { 513 if (info->dp->ds == ds && ds->ops->port_lag_change) 514 return ds->ops->port_lag_change(ds, info->dp->index); 515 516 if (info->dp->ds != ds && ds->ops->crosschip_lag_change) 517 return ds->ops->crosschip_lag_change(ds, info->dp->ds->index, 518 info->dp->index); 519 520 return 0; 521 } 522 523 static int dsa_switch_lag_join(struct dsa_switch *ds, 524 struct dsa_notifier_lag_info *info) 525 { 526 if (info->dp->ds == ds && ds->ops->port_lag_join) 527 return ds->ops->port_lag_join(ds, info->dp->index, info->lag, 528 info->info, info->extack); 529 530 if (info->dp->ds != ds && ds->ops->crosschip_lag_join) 531 return ds->ops->crosschip_lag_join(ds, info->dp->ds->index, 532 info->dp->index, info->lag, 533 info->info, info->extack); 534 535 return -EOPNOTSUPP; 536 } 537 538 static int dsa_switch_lag_leave(struct dsa_switch *ds, 539 struct dsa_notifier_lag_info *info) 540 { 541 if (info->dp->ds == ds && ds->ops->port_lag_leave) 542 return ds->ops->port_lag_leave(ds, info->dp->index, info->lag); 543 544 if (info->dp->ds != ds && ds->ops->crosschip_lag_leave) 545 return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index, 546 info->dp->index, info->lag); 547 548 return -EOPNOTSUPP; 549 } 550 551 static int dsa_switch_mdb_add(struct dsa_switch *ds, 552 struct dsa_notifier_mdb_info *info) 553 { 554 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 555 struct dsa_port *dp = dsa_to_port(ds, port); 556 557 if (!ds->ops->port_mdb_add) 558 return -EOPNOTSUPP; 559 560 return dsa_port_do_mdb_add(dp, info->mdb, info->db); 561 } 562 563 static int dsa_switch_mdb_del(struct dsa_switch *ds, 564 struct dsa_notifier_mdb_info *info) 565 { 566 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 567 struct dsa_port *dp = dsa_to_port(ds, port); 568 569 if (!ds->ops->port_mdb_del) 570 return -EOPNOTSUPP; 571 572 return dsa_port_do_mdb_del(dp, info->mdb, info->db); 573 } 574 575 static int dsa_switch_host_mdb_add(struct dsa_switch *ds, 576 struct dsa_notifier_mdb_info *info) 577 { 578 struct dsa_port *dp; 579 int err = 0; 580 581 if (!ds->ops->port_mdb_add) 582 return -EOPNOTSUPP; 583 584 dsa_switch_for_each_port(dp, ds) { 585 if (dsa_port_host_address_match(dp, info->dp)) { 586 err = dsa_port_do_mdb_add(dp, info->mdb, info->db); 587 if (err) 588 break; 589 } 590 } 591 592 return err; 593 } 594 595 static int dsa_switch_host_mdb_del(struct dsa_switch *ds, 596 struct dsa_notifier_mdb_info *info) 597 { 598 struct dsa_port *dp; 599 int err = 0; 600 601 if (!ds->ops->port_mdb_del) 602 return -EOPNOTSUPP; 603 604 dsa_switch_for_each_port(dp, ds) { 605 if (dsa_port_host_address_match(dp, info->dp)) { 606 err = dsa_port_do_mdb_del(dp, info->mdb, info->db); 607 if (err) 608 break; 609 } 610 } 611 612 return err; 613 } 614 615 /* Port VLANs match on the targeted port and on all DSA ports */ 616 static bool dsa_port_vlan_match(struct dsa_port *dp, 617 struct dsa_notifier_vlan_info *info) 618 { 619 return dsa_port_is_dsa(dp) || dp == info->dp; 620 } 621 622 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports 623 * (upstream and downstream) of that switch and its upstream switches. 624 */ 625 static bool dsa_port_host_vlan_match(struct dsa_port *dp, 626 const struct dsa_port *targeted_dp) 627 { 628 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 629 630 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 631 return dsa_port_is_dsa(dp) || dp == cpu_dp; 632 633 return false; 634 } 635 636 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, 637 const struct switchdev_obj_port_vlan *vlan) 638 { 639 struct dsa_vlan *v; 640 641 list_for_each_entry(v, vlan_list, list) 642 if (v->vid == vlan->vid) 643 return v; 644 645 return NULL; 646 } 647 648 static int dsa_port_do_vlan_add(struct dsa_port *dp, 649 const struct switchdev_obj_port_vlan *vlan, 650 struct netlink_ext_ack *extack) 651 { 652 struct dsa_switch *ds = dp->ds; 653 int port = dp->index; 654 struct dsa_vlan *v; 655 int err = 0; 656 657 /* No need to bother with refcounting for user ports. */ 658 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 659 return ds->ops->port_vlan_add(ds, port, vlan, extack); 660 661 /* No need to propagate on shared ports the existing VLANs that were 662 * re-notified after just the flags have changed. This would cause a 663 * refcount bump which we need to avoid, since it unbalances the 664 * additions with the deletions. 665 */ 666 if (vlan->changed) 667 return 0; 668 669 mutex_lock(&dp->vlans_lock); 670 671 v = dsa_vlan_find(&dp->vlans, vlan); 672 if (v) { 673 refcount_inc(&v->refcount); 674 goto out; 675 } 676 677 v = kzalloc(sizeof(*v), GFP_KERNEL); 678 if (!v) { 679 err = -ENOMEM; 680 goto out; 681 } 682 683 err = ds->ops->port_vlan_add(ds, port, vlan, extack); 684 if (err) { 685 kfree(v); 686 goto out; 687 } 688 689 v->vid = vlan->vid; 690 refcount_set(&v->refcount, 1); 691 list_add_tail(&v->list, &dp->vlans); 692 693 out: 694 mutex_unlock(&dp->vlans_lock); 695 696 return err; 697 } 698 699 static int dsa_port_do_vlan_del(struct dsa_port *dp, 700 const struct switchdev_obj_port_vlan *vlan) 701 { 702 struct dsa_switch *ds = dp->ds; 703 int port = dp->index; 704 struct dsa_vlan *v; 705 int err = 0; 706 707 /* No need to bother with refcounting for user ports */ 708 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 709 return ds->ops->port_vlan_del(ds, port, vlan); 710 711 mutex_lock(&dp->vlans_lock); 712 713 v = dsa_vlan_find(&dp->vlans, vlan); 714 if (!v) { 715 err = -ENOENT; 716 goto out; 717 } 718 719 if (!refcount_dec_and_test(&v->refcount)) 720 goto out; 721 722 err = ds->ops->port_vlan_del(ds, port, vlan); 723 if (err) { 724 refcount_set(&v->refcount, 1); 725 goto out; 726 } 727 728 list_del(&v->list); 729 kfree(v); 730 731 out: 732 mutex_unlock(&dp->vlans_lock); 733 734 return err; 735 } 736 737 static int dsa_switch_vlan_add(struct dsa_switch *ds, 738 struct dsa_notifier_vlan_info *info) 739 { 740 struct dsa_port *dp; 741 int err; 742 743 if (!ds->ops->port_vlan_add) 744 return -EOPNOTSUPP; 745 746 dsa_switch_for_each_port(dp, ds) { 747 if (dsa_port_vlan_match(dp, info)) { 748 err = dsa_port_do_vlan_add(dp, info->vlan, 749 info->extack); 750 if (err) 751 return err; 752 } 753 } 754 755 return 0; 756 } 757 758 static int dsa_switch_vlan_del(struct dsa_switch *ds, 759 struct dsa_notifier_vlan_info *info) 760 { 761 struct dsa_port *dp; 762 int err; 763 764 if (!ds->ops->port_vlan_del) 765 return -EOPNOTSUPP; 766 767 dsa_switch_for_each_port(dp, ds) { 768 if (dsa_port_vlan_match(dp, info)) { 769 err = dsa_port_do_vlan_del(dp, info->vlan); 770 if (err) 771 return err; 772 } 773 } 774 775 return 0; 776 } 777 778 static int dsa_switch_host_vlan_add(struct dsa_switch *ds, 779 struct dsa_notifier_vlan_info *info) 780 { 781 struct dsa_port *dp; 782 int err; 783 784 if (!ds->ops->port_vlan_add) 785 return -EOPNOTSUPP; 786 787 dsa_switch_for_each_port(dp, ds) { 788 if (dsa_port_host_vlan_match(dp, info->dp)) { 789 err = dsa_port_do_vlan_add(dp, info->vlan, 790 info->extack); 791 if (err) 792 return err; 793 } 794 } 795 796 return 0; 797 } 798 799 static int dsa_switch_host_vlan_del(struct dsa_switch *ds, 800 struct dsa_notifier_vlan_info *info) 801 { 802 struct dsa_port *dp; 803 int err; 804 805 if (!ds->ops->port_vlan_del) 806 return -EOPNOTSUPP; 807 808 dsa_switch_for_each_port(dp, ds) { 809 if (dsa_port_host_vlan_match(dp, info->dp)) { 810 err = dsa_port_do_vlan_del(dp, info->vlan); 811 if (err) 812 return err; 813 } 814 } 815 816 return 0; 817 } 818 819 static int dsa_switch_change_tag_proto(struct dsa_switch *ds, 820 struct dsa_notifier_tag_proto_info *info) 821 { 822 const struct dsa_device_ops *tag_ops = info->tag_ops; 823 struct dsa_port *dp, *cpu_dp; 824 int err; 825 826 if (!ds->ops->change_tag_protocol) 827 return -EOPNOTSUPP; 828 829 ASSERT_RTNL(); 830 831 err = ds->ops->change_tag_protocol(ds, tag_ops->proto); 832 if (err) 833 return err; 834 835 dsa_switch_for_each_cpu_port(cpu_dp, ds) 836 dsa_port_set_tag_protocol(cpu_dp, tag_ops); 837 838 /* Now that changing the tag protocol can no longer fail, let's update 839 * the remaining bits which are "duplicated for faster access", and the 840 * bits that depend on the tagger, such as the MTU. 841 */ 842 dsa_switch_for_each_user_port(dp, ds) { 843 struct net_device *slave = dp->slave; 844 845 dsa_slave_setup_tagger(slave); 846 847 /* rtnl_mutex is held in dsa_tree_change_tag_proto */ 848 dsa_slave_change_mtu(slave, slave->mtu); 849 } 850 851 return 0; 852 } 853 854 /* We use the same cross-chip notifiers to inform both the tagger side, as well 855 * as the switch side, of connection and disconnection events. 856 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the 857 * switch side doesn't support connecting to this tagger, and therefore, the 858 * fact that we don't disconnect the tagger side doesn't constitute a memory 859 * leak: the tagger will still operate with persistent per-switch memory, just 860 * with the switch side unconnected to it. What does constitute a hard error is 861 * when the switch side supports connecting but fails. 862 */ 863 static int 864 dsa_switch_connect_tag_proto(struct dsa_switch *ds, 865 struct dsa_notifier_tag_proto_info *info) 866 { 867 const struct dsa_device_ops *tag_ops = info->tag_ops; 868 int err; 869 870 /* Notify the new tagger about the connection to this switch */ 871 if (tag_ops->connect) { 872 err = tag_ops->connect(ds); 873 if (err) 874 return err; 875 } 876 877 if (!ds->ops->connect_tag_protocol) 878 return -EOPNOTSUPP; 879 880 /* Notify the switch about the connection to the new tagger */ 881 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); 882 if (err) { 883 /* Revert the new tagger's connection to this tree */ 884 if (tag_ops->disconnect) 885 tag_ops->disconnect(ds); 886 return err; 887 } 888 889 return 0; 890 } 891 892 static int 893 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, 894 struct dsa_notifier_tag_proto_info *info) 895 { 896 const struct dsa_device_ops *tag_ops = info->tag_ops; 897 898 /* Notify the tagger about the disconnection from this switch */ 899 if (tag_ops->disconnect && ds->tagger_data) 900 tag_ops->disconnect(ds); 901 902 /* No need to notify the switch, since it shouldn't have any 903 * resources to tear down 904 */ 905 return 0; 906 } 907 908 static int 909 dsa_switch_master_state_change(struct dsa_switch *ds, 910 struct dsa_notifier_master_state_info *info) 911 { 912 if (!ds->ops->master_state_change) 913 return 0; 914 915 ds->ops->master_state_change(ds, info->master, info->operational); 916 917 return 0; 918 } 919 920 static int dsa_switch_event(struct notifier_block *nb, 921 unsigned long event, void *info) 922 { 923 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); 924 int err; 925 926 switch (event) { 927 case DSA_NOTIFIER_AGEING_TIME: 928 err = dsa_switch_ageing_time(ds, info); 929 break; 930 case DSA_NOTIFIER_BRIDGE_JOIN: 931 err = dsa_switch_bridge_join(ds, info); 932 break; 933 case DSA_NOTIFIER_BRIDGE_LEAVE: 934 err = dsa_switch_bridge_leave(ds, info); 935 break; 936 case DSA_NOTIFIER_FDB_ADD: 937 err = dsa_switch_fdb_add(ds, info); 938 break; 939 case DSA_NOTIFIER_FDB_DEL: 940 err = dsa_switch_fdb_del(ds, info); 941 break; 942 case DSA_NOTIFIER_HOST_FDB_ADD: 943 err = dsa_switch_host_fdb_add(ds, info); 944 break; 945 case DSA_NOTIFIER_HOST_FDB_DEL: 946 err = dsa_switch_host_fdb_del(ds, info); 947 break; 948 case DSA_NOTIFIER_LAG_FDB_ADD: 949 err = dsa_switch_lag_fdb_add(ds, info); 950 break; 951 case DSA_NOTIFIER_LAG_FDB_DEL: 952 err = dsa_switch_lag_fdb_del(ds, info); 953 break; 954 case DSA_NOTIFIER_LAG_CHANGE: 955 err = dsa_switch_lag_change(ds, info); 956 break; 957 case DSA_NOTIFIER_LAG_JOIN: 958 err = dsa_switch_lag_join(ds, info); 959 break; 960 case DSA_NOTIFIER_LAG_LEAVE: 961 err = dsa_switch_lag_leave(ds, info); 962 break; 963 case DSA_NOTIFIER_MDB_ADD: 964 err = dsa_switch_mdb_add(ds, info); 965 break; 966 case DSA_NOTIFIER_MDB_DEL: 967 err = dsa_switch_mdb_del(ds, info); 968 break; 969 case DSA_NOTIFIER_HOST_MDB_ADD: 970 err = dsa_switch_host_mdb_add(ds, info); 971 break; 972 case DSA_NOTIFIER_HOST_MDB_DEL: 973 err = dsa_switch_host_mdb_del(ds, info); 974 break; 975 case DSA_NOTIFIER_VLAN_ADD: 976 err = dsa_switch_vlan_add(ds, info); 977 break; 978 case DSA_NOTIFIER_VLAN_DEL: 979 err = dsa_switch_vlan_del(ds, info); 980 break; 981 case DSA_NOTIFIER_HOST_VLAN_ADD: 982 err = dsa_switch_host_vlan_add(ds, info); 983 break; 984 case DSA_NOTIFIER_HOST_VLAN_DEL: 985 err = dsa_switch_host_vlan_del(ds, info); 986 break; 987 case DSA_NOTIFIER_MTU: 988 err = dsa_switch_mtu(ds, info); 989 break; 990 case DSA_NOTIFIER_TAG_PROTO: 991 err = dsa_switch_change_tag_proto(ds, info); 992 break; 993 case DSA_NOTIFIER_TAG_PROTO_CONNECT: 994 err = dsa_switch_connect_tag_proto(ds, info); 995 break; 996 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: 997 err = dsa_switch_disconnect_tag_proto(ds, info); 998 break; 999 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD: 1000 err = dsa_switch_tag_8021q_vlan_add(ds, info); 1001 break; 1002 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: 1003 err = dsa_switch_tag_8021q_vlan_del(ds, info); 1004 break; 1005 case DSA_NOTIFIER_MASTER_STATE_CHANGE: 1006 err = dsa_switch_master_state_change(ds, info); 1007 break; 1008 default: 1009 err = -EOPNOTSUPP; 1010 break; 1011 } 1012 1013 if (err) 1014 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", 1015 event, err); 1016 1017 return notifier_from_errno(err); 1018 } 1019 1020 /** 1021 * dsa_tree_notify - Execute code for all switches in a DSA switch tree. 1022 * @dst: collection of struct dsa_switch devices to notify. 1023 * @e: event, must be of type DSA_NOTIFIER_* 1024 * @v: event-specific value. 1025 * 1026 * Given a struct dsa_switch_tree, this can be used to run a function once for 1027 * each member DSA switch. The other alternative of traversing the tree is only 1028 * through its ports list, which does not uniquely list the switches. 1029 */ 1030 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v) 1031 { 1032 struct raw_notifier_head *nh = &dst->nh; 1033 int err; 1034 1035 err = raw_notifier_call_chain(nh, e, v); 1036 1037 return notifier_to_errno(err); 1038 } 1039 1040 /** 1041 * dsa_broadcast - Notify all DSA trees in the system. 1042 * @e: event, must be of type DSA_NOTIFIER_* 1043 * @v: event-specific value. 1044 * 1045 * Can be used to notify the switching fabric of events such as cross-chip 1046 * bridging between disjoint trees (such as islands of tagger-compatible 1047 * switches bridged by an incompatible middle switch). 1048 * 1049 * WARNING: this function is not reliable during probe time, because probing 1050 * between trees is asynchronous and not all DSA trees might have probed. 1051 */ 1052 int dsa_broadcast(unsigned long e, void *v) 1053 { 1054 struct dsa_switch_tree *dst; 1055 int err = 0; 1056 1057 list_for_each_entry(dst, &dsa_tree_list, list) { 1058 err = dsa_tree_notify(dst, e, v); 1059 if (err) 1060 break; 1061 } 1062 1063 return err; 1064 } 1065 1066 int dsa_switch_register_notifier(struct dsa_switch *ds) 1067 { 1068 ds->nb.notifier_call = dsa_switch_event; 1069 1070 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); 1071 } 1072 1073 void dsa_switch_unregister_notifier(struct dsa_switch *ds) 1074 { 1075 int err; 1076 1077 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); 1078 if (err) 1079 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err); 1080 } 1081