1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch chip, part of a switch fabric 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/if_vlan.h> 13 #include <net/switchdev.h> 14 15 #include "dsa_priv.h" 16 17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, 18 unsigned int ageing_time) 19 { 20 struct dsa_port *dp; 21 22 dsa_switch_for_each_port(dp, ds) 23 if (dp->ageing_time && dp->ageing_time < ageing_time) 24 ageing_time = dp->ageing_time; 25 26 return ageing_time; 27 } 28 29 static int dsa_switch_ageing_time(struct dsa_switch *ds, 30 struct dsa_notifier_ageing_time_info *info) 31 { 32 unsigned int ageing_time = info->ageing_time; 33 34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min) 35 return -ERANGE; 36 37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max) 38 return -ERANGE; 39 40 /* Program the fastest ageing time in case of multiple bridges */ 41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time); 42 43 if (ds->ops->set_ageing_time) 44 return ds->ops->set_ageing_time(ds, ageing_time); 45 46 return 0; 47 } 48 49 static bool dsa_port_mtu_match(struct dsa_port *dp, 50 struct dsa_notifier_mtu_info *info) 51 { 52 return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp); 53 } 54 55 static int dsa_switch_mtu(struct dsa_switch *ds, 56 struct dsa_notifier_mtu_info *info) 57 { 58 struct dsa_port *dp; 59 int ret; 60 61 if (!ds->ops->port_change_mtu) 62 return -EOPNOTSUPP; 63 64 dsa_switch_for_each_port(dp, ds) { 65 if (dsa_port_mtu_match(dp, info)) { 66 ret = ds->ops->port_change_mtu(ds, dp->index, 67 info->mtu); 68 if (ret) 69 return ret; 70 } 71 } 72 73 return 0; 74 } 75 76 static int dsa_switch_bridge_join(struct dsa_switch *ds, 77 struct dsa_notifier_bridge_info *info) 78 { 79 int err; 80 81 if (info->dp->ds == ds) { 82 if (!ds->ops->port_bridge_join) 83 return -EOPNOTSUPP; 84 85 err = ds->ops->port_bridge_join(ds, info->dp->index, 86 info->bridge, 87 &info->tx_fwd_offload, 88 info->extack); 89 if (err) 90 return err; 91 } 92 93 if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) { 94 err = ds->ops->crosschip_bridge_join(ds, 95 info->dp->ds->dst->index, 96 info->dp->ds->index, 97 info->dp->index, 98 info->bridge, 99 info->extack); 100 if (err) 101 return err; 102 } 103 104 return 0; 105 } 106 107 static int dsa_switch_bridge_leave(struct dsa_switch *ds, 108 struct dsa_notifier_bridge_info *info) 109 { 110 if (info->dp->ds == ds && ds->ops->port_bridge_leave) 111 ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge); 112 113 if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave) 114 ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index, 115 info->dp->ds->index, 116 info->dp->index, 117 info->bridge); 118 119 return 0; 120 } 121 122 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing 123 * DSA links) that sit between the targeted port on which the notifier was 124 * emitted and its dedicated CPU port. 125 */ 126 static bool dsa_port_host_address_match(struct dsa_port *dp, 127 const struct dsa_port *targeted_dp) 128 { 129 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 130 131 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 132 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index, 133 cpu_dp->index); 134 135 return false; 136 } 137 138 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list, 139 const unsigned char *addr, u16 vid, 140 struct dsa_db db) 141 { 142 struct dsa_mac_addr *a; 143 144 list_for_each_entry(a, addr_list, list) 145 if (ether_addr_equal(a->addr, addr) && a->vid == vid && 146 dsa_db_equal(&a->db, &db)) 147 return a; 148 149 return NULL; 150 } 151 152 static int dsa_port_do_mdb_add(struct dsa_port *dp, 153 const struct switchdev_obj_port_mdb *mdb, 154 struct dsa_db db) 155 { 156 struct dsa_switch *ds = dp->ds; 157 struct dsa_mac_addr *a; 158 int port = dp->index; 159 int err = 0; 160 161 /* No need to bother with refcounting for user ports */ 162 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 163 return ds->ops->port_mdb_add(ds, port, mdb, db); 164 165 mutex_lock(&dp->addr_lists_lock); 166 167 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 168 if (a) { 169 refcount_inc(&a->refcount); 170 goto out; 171 } 172 173 a = kzalloc(sizeof(*a), GFP_KERNEL); 174 if (!a) { 175 err = -ENOMEM; 176 goto out; 177 } 178 179 err = ds->ops->port_mdb_add(ds, port, mdb, db); 180 if (err) { 181 kfree(a); 182 goto out; 183 } 184 185 ether_addr_copy(a->addr, mdb->addr); 186 a->vid = mdb->vid; 187 a->db = db; 188 refcount_set(&a->refcount, 1); 189 list_add_tail(&a->list, &dp->mdbs); 190 191 out: 192 mutex_unlock(&dp->addr_lists_lock); 193 194 return err; 195 } 196 197 static int dsa_port_do_mdb_del(struct dsa_port *dp, 198 const struct switchdev_obj_port_mdb *mdb, 199 struct dsa_db db) 200 { 201 struct dsa_switch *ds = dp->ds; 202 struct dsa_mac_addr *a; 203 int port = dp->index; 204 int err = 0; 205 206 /* No need to bother with refcounting for user ports */ 207 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 208 return ds->ops->port_mdb_del(ds, port, mdb, db); 209 210 mutex_lock(&dp->addr_lists_lock); 211 212 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 213 if (!a) { 214 err = -ENOENT; 215 goto out; 216 } 217 218 if (!refcount_dec_and_test(&a->refcount)) 219 goto out; 220 221 err = ds->ops->port_mdb_del(ds, port, mdb, db); 222 if (err) { 223 refcount_set(&a->refcount, 1); 224 goto out; 225 } 226 227 list_del(&a->list); 228 kfree(a); 229 230 out: 231 mutex_unlock(&dp->addr_lists_lock); 232 233 return err; 234 } 235 236 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, 237 u16 vid, struct dsa_db db) 238 { 239 struct dsa_switch *ds = dp->ds; 240 struct dsa_mac_addr *a; 241 int port = dp->index; 242 int err = 0; 243 244 /* No need to bother with refcounting for user ports */ 245 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 246 return ds->ops->port_fdb_add(ds, port, addr, vid, db); 247 248 mutex_lock(&dp->addr_lists_lock); 249 250 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 251 if (a) { 252 refcount_inc(&a->refcount); 253 goto out; 254 } 255 256 a = kzalloc(sizeof(*a), GFP_KERNEL); 257 if (!a) { 258 err = -ENOMEM; 259 goto out; 260 } 261 262 err = ds->ops->port_fdb_add(ds, port, addr, vid, db); 263 if (err) { 264 kfree(a); 265 goto out; 266 } 267 268 ether_addr_copy(a->addr, addr); 269 a->vid = vid; 270 a->db = db; 271 refcount_set(&a->refcount, 1); 272 list_add_tail(&a->list, &dp->fdbs); 273 274 out: 275 mutex_unlock(&dp->addr_lists_lock); 276 277 return err; 278 } 279 280 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, 281 u16 vid, struct dsa_db db) 282 { 283 struct dsa_switch *ds = dp->ds; 284 struct dsa_mac_addr *a; 285 int port = dp->index; 286 int err = 0; 287 288 /* No need to bother with refcounting for user ports */ 289 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 290 return ds->ops->port_fdb_del(ds, port, addr, vid, db); 291 292 mutex_lock(&dp->addr_lists_lock); 293 294 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 295 if (!a) { 296 err = -ENOENT; 297 goto out; 298 } 299 300 if (!refcount_dec_and_test(&a->refcount)) 301 goto out; 302 303 err = ds->ops->port_fdb_del(ds, port, addr, vid, db); 304 if (err) { 305 refcount_set(&a->refcount, 1); 306 goto out; 307 } 308 309 list_del(&a->list); 310 kfree(a); 311 312 out: 313 mutex_unlock(&dp->addr_lists_lock); 314 315 return err; 316 } 317 318 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag, 319 const unsigned char *addr, u16 vid, 320 struct dsa_db db) 321 { 322 struct dsa_mac_addr *a; 323 int err = 0; 324 325 mutex_lock(&lag->fdb_lock); 326 327 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 328 if (a) { 329 refcount_inc(&a->refcount); 330 goto out; 331 } 332 333 a = kzalloc(sizeof(*a), GFP_KERNEL); 334 if (!a) { 335 err = -ENOMEM; 336 goto out; 337 } 338 339 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db); 340 if (err) { 341 kfree(a); 342 goto out; 343 } 344 345 ether_addr_copy(a->addr, addr); 346 a->vid = vid; 347 a->db = db; 348 refcount_set(&a->refcount, 1); 349 list_add_tail(&a->list, &lag->fdbs); 350 351 out: 352 mutex_unlock(&lag->fdb_lock); 353 354 return err; 355 } 356 357 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag, 358 const unsigned char *addr, u16 vid, 359 struct dsa_db db) 360 { 361 struct dsa_mac_addr *a; 362 int err = 0; 363 364 mutex_lock(&lag->fdb_lock); 365 366 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 367 if (!a) { 368 err = -ENOENT; 369 goto out; 370 } 371 372 if (!refcount_dec_and_test(&a->refcount)) 373 goto out; 374 375 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db); 376 if (err) { 377 refcount_set(&a->refcount, 1); 378 goto out; 379 } 380 381 list_del(&a->list); 382 kfree(a); 383 384 out: 385 mutex_unlock(&lag->fdb_lock); 386 387 return err; 388 } 389 390 static int dsa_switch_host_fdb_add(struct dsa_switch *ds, 391 struct dsa_notifier_fdb_info *info) 392 { 393 struct dsa_port *dp; 394 int err = 0; 395 396 if (!ds->ops->port_fdb_add) 397 return -EOPNOTSUPP; 398 399 dsa_switch_for_each_port(dp, ds) { 400 if (dsa_port_host_address_match(dp, info->dp)) { 401 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 402 err = dsa_switch_do_lag_fdb_add(ds, dp->lag, 403 info->addr, 404 info->vid, 405 info->db); 406 } else { 407 err = dsa_port_do_fdb_add(dp, info->addr, 408 info->vid, info->db); 409 } 410 if (err) 411 break; 412 } 413 } 414 415 return err; 416 } 417 418 static int dsa_switch_host_fdb_del(struct dsa_switch *ds, 419 struct dsa_notifier_fdb_info *info) 420 { 421 struct dsa_port *dp; 422 int err = 0; 423 424 if (!ds->ops->port_fdb_del) 425 return -EOPNOTSUPP; 426 427 dsa_switch_for_each_port(dp, ds) { 428 if (dsa_port_host_address_match(dp, info->dp)) { 429 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 430 err = dsa_switch_do_lag_fdb_del(ds, dp->lag, 431 info->addr, 432 info->vid, 433 info->db); 434 } else { 435 err = dsa_port_do_fdb_del(dp, info->addr, 436 info->vid, info->db); 437 } 438 if (err) 439 break; 440 } 441 } 442 443 return err; 444 } 445 446 static int dsa_switch_fdb_add(struct dsa_switch *ds, 447 struct dsa_notifier_fdb_info *info) 448 { 449 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 450 struct dsa_port *dp = dsa_to_port(ds, port); 451 452 if (!ds->ops->port_fdb_add) 453 return -EOPNOTSUPP; 454 455 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db); 456 } 457 458 static int dsa_switch_fdb_del(struct dsa_switch *ds, 459 struct dsa_notifier_fdb_info *info) 460 { 461 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 462 struct dsa_port *dp = dsa_to_port(ds, port); 463 464 if (!ds->ops->port_fdb_del) 465 return -EOPNOTSUPP; 466 467 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db); 468 } 469 470 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds, 471 struct dsa_notifier_lag_fdb_info *info) 472 { 473 struct dsa_port *dp; 474 475 if (!ds->ops->lag_fdb_add) 476 return -EOPNOTSUPP; 477 478 /* Notify switch only if it has a port in this LAG */ 479 dsa_switch_for_each_port(dp, ds) 480 if (dsa_port_offloads_lag(dp, info->lag)) 481 return dsa_switch_do_lag_fdb_add(ds, info->lag, 482 info->addr, info->vid, 483 info->db); 484 485 return 0; 486 } 487 488 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds, 489 struct dsa_notifier_lag_fdb_info *info) 490 { 491 struct dsa_port *dp; 492 493 if (!ds->ops->lag_fdb_del) 494 return -EOPNOTSUPP; 495 496 /* Notify switch only if it has a port in this LAG */ 497 dsa_switch_for_each_port(dp, ds) 498 if (dsa_port_offloads_lag(dp, info->lag)) 499 return dsa_switch_do_lag_fdb_del(ds, info->lag, 500 info->addr, info->vid, 501 info->db); 502 503 return 0; 504 } 505 506 static int dsa_switch_lag_change(struct dsa_switch *ds, 507 struct dsa_notifier_lag_info *info) 508 { 509 if (info->dp->ds == ds && ds->ops->port_lag_change) 510 return ds->ops->port_lag_change(ds, info->dp->index); 511 512 if (info->dp->ds != ds && ds->ops->crosschip_lag_change) 513 return ds->ops->crosschip_lag_change(ds, info->dp->ds->index, 514 info->dp->index); 515 516 return 0; 517 } 518 519 static int dsa_switch_lag_join(struct dsa_switch *ds, 520 struct dsa_notifier_lag_info *info) 521 { 522 if (info->dp->ds == ds && ds->ops->port_lag_join) 523 return ds->ops->port_lag_join(ds, info->dp->index, info->lag, 524 info->info, info->extack); 525 526 if (info->dp->ds != ds && ds->ops->crosschip_lag_join) 527 return ds->ops->crosschip_lag_join(ds, info->dp->ds->index, 528 info->dp->index, info->lag, 529 info->info, info->extack); 530 531 return -EOPNOTSUPP; 532 } 533 534 static int dsa_switch_lag_leave(struct dsa_switch *ds, 535 struct dsa_notifier_lag_info *info) 536 { 537 if (info->dp->ds == ds && ds->ops->port_lag_leave) 538 return ds->ops->port_lag_leave(ds, info->dp->index, info->lag); 539 540 if (info->dp->ds != ds && ds->ops->crosschip_lag_leave) 541 return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index, 542 info->dp->index, info->lag); 543 544 return -EOPNOTSUPP; 545 } 546 547 static int dsa_switch_mdb_add(struct dsa_switch *ds, 548 struct dsa_notifier_mdb_info *info) 549 { 550 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 551 struct dsa_port *dp = dsa_to_port(ds, port); 552 553 if (!ds->ops->port_mdb_add) 554 return -EOPNOTSUPP; 555 556 return dsa_port_do_mdb_add(dp, info->mdb, info->db); 557 } 558 559 static int dsa_switch_mdb_del(struct dsa_switch *ds, 560 struct dsa_notifier_mdb_info *info) 561 { 562 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 563 struct dsa_port *dp = dsa_to_port(ds, port); 564 565 if (!ds->ops->port_mdb_del) 566 return -EOPNOTSUPP; 567 568 return dsa_port_do_mdb_del(dp, info->mdb, info->db); 569 } 570 571 static int dsa_switch_host_mdb_add(struct dsa_switch *ds, 572 struct dsa_notifier_mdb_info *info) 573 { 574 struct dsa_port *dp; 575 int err = 0; 576 577 if (!ds->ops->port_mdb_add) 578 return -EOPNOTSUPP; 579 580 dsa_switch_for_each_port(dp, ds) { 581 if (dsa_port_host_address_match(dp, info->dp)) { 582 err = dsa_port_do_mdb_add(dp, info->mdb, info->db); 583 if (err) 584 break; 585 } 586 } 587 588 return err; 589 } 590 591 static int dsa_switch_host_mdb_del(struct dsa_switch *ds, 592 struct dsa_notifier_mdb_info *info) 593 { 594 struct dsa_port *dp; 595 int err = 0; 596 597 if (!ds->ops->port_mdb_del) 598 return -EOPNOTSUPP; 599 600 dsa_switch_for_each_port(dp, ds) { 601 if (dsa_port_host_address_match(dp, info->dp)) { 602 err = dsa_port_do_mdb_del(dp, info->mdb, info->db); 603 if (err) 604 break; 605 } 606 } 607 608 return err; 609 } 610 611 /* Port VLANs match on the targeted port and on all DSA ports */ 612 static bool dsa_port_vlan_match(struct dsa_port *dp, 613 struct dsa_notifier_vlan_info *info) 614 { 615 return dsa_port_is_dsa(dp) || dp == info->dp; 616 } 617 618 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports 619 * (upstream and downstream) of that switch and its upstream switches. 620 */ 621 static bool dsa_port_host_vlan_match(struct dsa_port *dp, 622 const struct dsa_port *targeted_dp) 623 { 624 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 625 626 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 627 return dsa_port_is_dsa(dp) || dp == cpu_dp; 628 629 return false; 630 } 631 632 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, 633 const struct switchdev_obj_port_vlan *vlan) 634 { 635 struct dsa_vlan *v; 636 637 list_for_each_entry(v, vlan_list, list) 638 if (v->vid == vlan->vid) 639 return v; 640 641 return NULL; 642 } 643 644 static int dsa_port_do_vlan_add(struct dsa_port *dp, 645 const struct switchdev_obj_port_vlan *vlan, 646 struct netlink_ext_ack *extack) 647 { 648 struct dsa_switch *ds = dp->ds; 649 int port = dp->index; 650 struct dsa_vlan *v; 651 int err = 0; 652 653 /* No need to bother with refcounting for user ports. */ 654 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 655 return ds->ops->port_vlan_add(ds, port, vlan, extack); 656 657 /* No need to propagate on shared ports the existing VLANs that were 658 * re-notified after just the flags have changed. This would cause a 659 * refcount bump which we need to avoid, since it unbalances the 660 * additions with the deletions. 661 */ 662 if (vlan->changed) 663 return 0; 664 665 mutex_lock(&dp->vlans_lock); 666 667 v = dsa_vlan_find(&dp->vlans, vlan); 668 if (v) { 669 refcount_inc(&v->refcount); 670 goto out; 671 } 672 673 v = kzalloc(sizeof(*v), GFP_KERNEL); 674 if (!v) { 675 err = -ENOMEM; 676 goto out; 677 } 678 679 err = ds->ops->port_vlan_add(ds, port, vlan, extack); 680 if (err) { 681 kfree(v); 682 goto out; 683 } 684 685 v->vid = vlan->vid; 686 refcount_set(&v->refcount, 1); 687 list_add_tail(&v->list, &dp->vlans); 688 689 out: 690 mutex_unlock(&dp->vlans_lock); 691 692 return err; 693 } 694 695 static int dsa_port_do_vlan_del(struct dsa_port *dp, 696 const struct switchdev_obj_port_vlan *vlan) 697 { 698 struct dsa_switch *ds = dp->ds; 699 int port = dp->index; 700 struct dsa_vlan *v; 701 int err = 0; 702 703 /* No need to bother with refcounting for user ports */ 704 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 705 return ds->ops->port_vlan_del(ds, port, vlan); 706 707 mutex_lock(&dp->vlans_lock); 708 709 v = dsa_vlan_find(&dp->vlans, vlan); 710 if (!v) { 711 err = -ENOENT; 712 goto out; 713 } 714 715 if (!refcount_dec_and_test(&v->refcount)) 716 goto out; 717 718 err = ds->ops->port_vlan_del(ds, port, vlan); 719 if (err) { 720 refcount_set(&v->refcount, 1); 721 goto out; 722 } 723 724 list_del(&v->list); 725 kfree(v); 726 727 out: 728 mutex_unlock(&dp->vlans_lock); 729 730 return err; 731 } 732 733 static int dsa_switch_vlan_add(struct dsa_switch *ds, 734 struct dsa_notifier_vlan_info *info) 735 { 736 struct dsa_port *dp; 737 int err; 738 739 if (!ds->ops->port_vlan_add) 740 return -EOPNOTSUPP; 741 742 dsa_switch_for_each_port(dp, ds) { 743 if (dsa_port_vlan_match(dp, info)) { 744 err = dsa_port_do_vlan_add(dp, info->vlan, 745 info->extack); 746 if (err) 747 return err; 748 } 749 } 750 751 return 0; 752 } 753 754 static int dsa_switch_vlan_del(struct dsa_switch *ds, 755 struct dsa_notifier_vlan_info *info) 756 { 757 struct dsa_port *dp; 758 int err; 759 760 if (!ds->ops->port_vlan_del) 761 return -EOPNOTSUPP; 762 763 dsa_switch_for_each_port(dp, ds) { 764 if (dsa_port_vlan_match(dp, info)) { 765 err = dsa_port_do_vlan_del(dp, info->vlan); 766 if (err) 767 return err; 768 } 769 } 770 771 return 0; 772 } 773 774 static int dsa_switch_host_vlan_add(struct dsa_switch *ds, 775 struct dsa_notifier_vlan_info *info) 776 { 777 struct dsa_port *dp; 778 int err; 779 780 if (!ds->ops->port_vlan_add) 781 return -EOPNOTSUPP; 782 783 dsa_switch_for_each_port(dp, ds) { 784 if (dsa_port_host_vlan_match(dp, info->dp)) { 785 err = dsa_port_do_vlan_add(dp, info->vlan, 786 info->extack); 787 if (err) 788 return err; 789 } 790 } 791 792 return 0; 793 } 794 795 static int dsa_switch_host_vlan_del(struct dsa_switch *ds, 796 struct dsa_notifier_vlan_info *info) 797 { 798 struct dsa_port *dp; 799 int err; 800 801 if (!ds->ops->port_vlan_del) 802 return -EOPNOTSUPP; 803 804 dsa_switch_for_each_port(dp, ds) { 805 if (dsa_port_host_vlan_match(dp, info->dp)) { 806 err = dsa_port_do_vlan_del(dp, info->vlan); 807 if (err) 808 return err; 809 } 810 } 811 812 return 0; 813 } 814 815 static int dsa_switch_change_tag_proto(struct dsa_switch *ds, 816 struct dsa_notifier_tag_proto_info *info) 817 { 818 const struct dsa_device_ops *tag_ops = info->tag_ops; 819 struct dsa_port *dp, *cpu_dp; 820 int err; 821 822 if (!ds->ops->change_tag_protocol) 823 return -EOPNOTSUPP; 824 825 ASSERT_RTNL(); 826 827 err = ds->ops->change_tag_protocol(ds, tag_ops->proto); 828 if (err) 829 return err; 830 831 dsa_switch_for_each_cpu_port(cpu_dp, ds) 832 dsa_port_set_tag_protocol(cpu_dp, tag_ops); 833 834 /* Now that changing the tag protocol can no longer fail, let's update 835 * the remaining bits which are "duplicated for faster access", and the 836 * bits that depend on the tagger, such as the MTU. 837 */ 838 dsa_switch_for_each_user_port(dp, ds) { 839 struct net_device *slave = dp->slave; 840 841 dsa_slave_setup_tagger(slave); 842 843 /* rtnl_mutex is held in dsa_tree_change_tag_proto */ 844 dsa_slave_change_mtu(slave, slave->mtu); 845 } 846 847 return 0; 848 } 849 850 /* We use the same cross-chip notifiers to inform both the tagger side, as well 851 * as the switch side, of connection and disconnection events. 852 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the 853 * switch side doesn't support connecting to this tagger, and therefore, the 854 * fact that we don't disconnect the tagger side doesn't constitute a memory 855 * leak: the tagger will still operate with persistent per-switch memory, just 856 * with the switch side unconnected to it. What does constitute a hard error is 857 * when the switch side supports connecting but fails. 858 */ 859 static int 860 dsa_switch_connect_tag_proto(struct dsa_switch *ds, 861 struct dsa_notifier_tag_proto_info *info) 862 { 863 const struct dsa_device_ops *tag_ops = info->tag_ops; 864 int err; 865 866 /* Notify the new tagger about the connection to this switch */ 867 if (tag_ops->connect) { 868 err = tag_ops->connect(ds); 869 if (err) 870 return err; 871 } 872 873 if (!ds->ops->connect_tag_protocol) 874 return -EOPNOTSUPP; 875 876 /* Notify the switch about the connection to the new tagger */ 877 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); 878 if (err) { 879 /* Revert the new tagger's connection to this tree */ 880 if (tag_ops->disconnect) 881 tag_ops->disconnect(ds); 882 return err; 883 } 884 885 return 0; 886 } 887 888 static int 889 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, 890 struct dsa_notifier_tag_proto_info *info) 891 { 892 const struct dsa_device_ops *tag_ops = info->tag_ops; 893 894 /* Notify the tagger about the disconnection from this switch */ 895 if (tag_ops->disconnect && ds->tagger_data) 896 tag_ops->disconnect(ds); 897 898 /* No need to notify the switch, since it shouldn't have any 899 * resources to tear down 900 */ 901 return 0; 902 } 903 904 static int 905 dsa_switch_master_state_change(struct dsa_switch *ds, 906 struct dsa_notifier_master_state_info *info) 907 { 908 if (!ds->ops->master_state_change) 909 return 0; 910 911 ds->ops->master_state_change(ds, info->master, info->operational); 912 913 return 0; 914 } 915 916 static int dsa_switch_event(struct notifier_block *nb, 917 unsigned long event, void *info) 918 { 919 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); 920 int err; 921 922 switch (event) { 923 case DSA_NOTIFIER_AGEING_TIME: 924 err = dsa_switch_ageing_time(ds, info); 925 break; 926 case DSA_NOTIFIER_BRIDGE_JOIN: 927 err = dsa_switch_bridge_join(ds, info); 928 break; 929 case DSA_NOTIFIER_BRIDGE_LEAVE: 930 err = dsa_switch_bridge_leave(ds, info); 931 break; 932 case DSA_NOTIFIER_FDB_ADD: 933 err = dsa_switch_fdb_add(ds, info); 934 break; 935 case DSA_NOTIFIER_FDB_DEL: 936 err = dsa_switch_fdb_del(ds, info); 937 break; 938 case DSA_NOTIFIER_HOST_FDB_ADD: 939 err = dsa_switch_host_fdb_add(ds, info); 940 break; 941 case DSA_NOTIFIER_HOST_FDB_DEL: 942 err = dsa_switch_host_fdb_del(ds, info); 943 break; 944 case DSA_NOTIFIER_LAG_FDB_ADD: 945 err = dsa_switch_lag_fdb_add(ds, info); 946 break; 947 case DSA_NOTIFIER_LAG_FDB_DEL: 948 err = dsa_switch_lag_fdb_del(ds, info); 949 break; 950 case DSA_NOTIFIER_LAG_CHANGE: 951 err = dsa_switch_lag_change(ds, info); 952 break; 953 case DSA_NOTIFIER_LAG_JOIN: 954 err = dsa_switch_lag_join(ds, info); 955 break; 956 case DSA_NOTIFIER_LAG_LEAVE: 957 err = dsa_switch_lag_leave(ds, info); 958 break; 959 case DSA_NOTIFIER_MDB_ADD: 960 err = dsa_switch_mdb_add(ds, info); 961 break; 962 case DSA_NOTIFIER_MDB_DEL: 963 err = dsa_switch_mdb_del(ds, info); 964 break; 965 case DSA_NOTIFIER_HOST_MDB_ADD: 966 err = dsa_switch_host_mdb_add(ds, info); 967 break; 968 case DSA_NOTIFIER_HOST_MDB_DEL: 969 err = dsa_switch_host_mdb_del(ds, info); 970 break; 971 case DSA_NOTIFIER_VLAN_ADD: 972 err = dsa_switch_vlan_add(ds, info); 973 break; 974 case DSA_NOTIFIER_VLAN_DEL: 975 err = dsa_switch_vlan_del(ds, info); 976 break; 977 case DSA_NOTIFIER_HOST_VLAN_ADD: 978 err = dsa_switch_host_vlan_add(ds, info); 979 break; 980 case DSA_NOTIFIER_HOST_VLAN_DEL: 981 err = dsa_switch_host_vlan_del(ds, info); 982 break; 983 case DSA_NOTIFIER_MTU: 984 err = dsa_switch_mtu(ds, info); 985 break; 986 case DSA_NOTIFIER_TAG_PROTO: 987 err = dsa_switch_change_tag_proto(ds, info); 988 break; 989 case DSA_NOTIFIER_TAG_PROTO_CONNECT: 990 err = dsa_switch_connect_tag_proto(ds, info); 991 break; 992 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: 993 err = dsa_switch_disconnect_tag_proto(ds, info); 994 break; 995 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD: 996 err = dsa_switch_tag_8021q_vlan_add(ds, info); 997 break; 998 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: 999 err = dsa_switch_tag_8021q_vlan_del(ds, info); 1000 break; 1001 case DSA_NOTIFIER_MASTER_STATE_CHANGE: 1002 err = dsa_switch_master_state_change(ds, info); 1003 break; 1004 default: 1005 err = -EOPNOTSUPP; 1006 break; 1007 } 1008 1009 if (err) 1010 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", 1011 event, err); 1012 1013 return notifier_from_errno(err); 1014 } 1015 1016 int dsa_switch_register_notifier(struct dsa_switch *ds) 1017 { 1018 ds->nb.notifier_call = dsa_switch_event; 1019 1020 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); 1021 } 1022 1023 void dsa_switch_unregister_notifier(struct dsa_switch *ds) 1024 { 1025 int err; 1026 1027 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); 1028 if (err) 1029 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err); 1030 } 1031