1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/switchdev/switchdev.c - Switch device API 4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> 5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/init.h> 11 #include <linux/mutex.h> 12 #include <linux/notifier.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/if_bridge.h> 16 #include <linux/list.h> 17 #include <linux/workqueue.h> 18 #include <linux/if_vlan.h> 19 #include <linux/rtnetlink.h> 20 #include <net/switchdev.h> 21 22 static LIST_HEAD(deferred); 23 static DEFINE_SPINLOCK(deferred_lock); 24 25 typedef void switchdev_deferred_func_t(struct net_device *dev, 26 const void *data); 27 28 struct switchdev_deferred_item { 29 struct list_head list; 30 struct net_device *dev; 31 switchdev_deferred_func_t *func; 32 unsigned long data[]; 33 }; 34 35 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) 36 { 37 struct switchdev_deferred_item *dfitem; 38 39 spin_lock_bh(&deferred_lock); 40 if (list_empty(&deferred)) { 41 dfitem = NULL; 42 goto unlock; 43 } 44 dfitem = list_first_entry(&deferred, 45 struct switchdev_deferred_item, list); 46 list_del(&dfitem->list); 47 unlock: 48 spin_unlock_bh(&deferred_lock); 49 return dfitem; 50 } 51 52 /** 53 * switchdev_deferred_process - Process ops in deferred queue 54 * 55 * Called to flush the ops currently queued in deferred ops queue. 56 * rtnl_lock must be held. 57 */ 58 void switchdev_deferred_process(void) 59 { 60 struct switchdev_deferred_item *dfitem; 61 62 ASSERT_RTNL(); 63 64 while ((dfitem = switchdev_deferred_dequeue())) { 65 dfitem->func(dfitem->dev, dfitem->data); 66 dev_put(dfitem->dev); 67 kfree(dfitem); 68 } 69 } 70 EXPORT_SYMBOL_GPL(switchdev_deferred_process); 71 72 static void switchdev_deferred_process_work(struct work_struct *work) 73 { 74 rtnl_lock(); 75 switchdev_deferred_process(); 76 rtnl_unlock(); 77 } 78 79 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); 80 81 static int switchdev_deferred_enqueue(struct net_device *dev, 82 const void *data, size_t data_len, 83 switchdev_deferred_func_t *func) 84 { 85 struct switchdev_deferred_item *dfitem; 86 87 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); 88 if (!dfitem) 89 return -ENOMEM; 90 dfitem->dev = dev; 91 dfitem->func = func; 92 memcpy(dfitem->data, data, data_len); 93 dev_hold(dev); 94 spin_lock_bh(&deferred_lock); 95 list_add_tail(&dfitem->list, &deferred); 96 spin_unlock_bh(&deferred_lock); 97 schedule_work(&deferred_process_work); 98 return 0; 99 } 100 101 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt, 102 struct net_device *dev, 103 const struct switchdev_attr *attr, 104 struct netlink_ext_ack *extack) 105 { 106 int err; 107 int rc; 108 109 struct switchdev_notifier_port_attr_info attr_info = { 110 .attr = attr, 111 .handled = false, 112 }; 113 114 rc = call_switchdev_blocking_notifiers(nt, dev, 115 &attr_info.info, extack); 116 err = notifier_to_errno(rc); 117 if (err) { 118 WARN_ON(!attr_info.handled); 119 return err; 120 } 121 122 if (!attr_info.handled) 123 return -EOPNOTSUPP; 124 125 return 0; 126 } 127 128 static int switchdev_port_attr_set_now(struct net_device *dev, 129 const struct switchdev_attr *attr, 130 struct netlink_ext_ack *extack) 131 { 132 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr, 133 extack); 134 } 135 136 static void switchdev_port_attr_set_deferred(struct net_device *dev, 137 const void *data) 138 { 139 const struct switchdev_attr *attr = data; 140 int err; 141 142 err = switchdev_port_attr_set_now(dev, attr, NULL); 143 if (err && err != -EOPNOTSUPP) 144 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 145 err, attr->id); 146 if (attr->complete) 147 attr->complete(dev, err, attr->complete_priv); 148 } 149 150 static int switchdev_port_attr_set_defer(struct net_device *dev, 151 const struct switchdev_attr *attr) 152 { 153 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), 154 switchdev_port_attr_set_deferred); 155 } 156 157 /** 158 * switchdev_port_attr_set - Set port attribute 159 * 160 * @dev: port device 161 * @attr: attribute to set 162 * @extack: netlink extended ack, for error message propagation 163 * 164 * rtnl_lock must be held and must not be in atomic section, 165 * in case SWITCHDEV_F_DEFER flag is not set. 166 */ 167 int switchdev_port_attr_set(struct net_device *dev, 168 const struct switchdev_attr *attr, 169 struct netlink_ext_ack *extack) 170 { 171 if (attr->flags & SWITCHDEV_F_DEFER) 172 return switchdev_port_attr_set_defer(dev, attr); 173 ASSERT_RTNL(); 174 return switchdev_port_attr_set_now(dev, attr, extack); 175 } 176 EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 177 178 static size_t switchdev_obj_size(const struct switchdev_obj *obj) 179 { 180 switch (obj->id) { 181 case SWITCHDEV_OBJ_ID_PORT_VLAN: 182 return sizeof(struct switchdev_obj_port_vlan); 183 case SWITCHDEV_OBJ_ID_PORT_MDB: 184 return sizeof(struct switchdev_obj_port_mdb); 185 case SWITCHDEV_OBJ_ID_HOST_MDB: 186 return sizeof(struct switchdev_obj_port_mdb); 187 default: 188 BUG(); 189 } 190 return 0; 191 } 192 193 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, 194 struct net_device *dev, 195 const struct switchdev_obj *obj, 196 struct netlink_ext_ack *extack) 197 { 198 int rc; 199 int err; 200 201 struct switchdev_notifier_port_obj_info obj_info = { 202 .obj = obj, 203 .handled = false, 204 }; 205 206 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); 207 err = notifier_to_errno(rc); 208 if (err) { 209 WARN_ON(!obj_info.handled); 210 return err; 211 } 212 if (!obj_info.handled) 213 return -EOPNOTSUPP; 214 return 0; 215 } 216 217 static void switchdev_port_obj_add_deferred(struct net_device *dev, 218 const void *data) 219 { 220 const struct switchdev_obj *obj = data; 221 int err; 222 223 ASSERT_RTNL(); 224 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 225 dev, obj, NULL); 226 if (err && err != -EOPNOTSUPP) 227 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 228 err, obj->id); 229 if (obj->complete) 230 obj->complete(dev, err, obj->complete_priv); 231 } 232 233 static int switchdev_port_obj_add_defer(struct net_device *dev, 234 const struct switchdev_obj *obj) 235 { 236 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 237 switchdev_port_obj_add_deferred); 238 } 239 240 /** 241 * switchdev_port_obj_add - Add port object 242 * 243 * @dev: port device 244 * @obj: object to add 245 * @extack: netlink extended ack 246 * 247 * rtnl_lock must be held and must not be in atomic section, 248 * in case SWITCHDEV_F_DEFER flag is not set. 249 */ 250 int switchdev_port_obj_add(struct net_device *dev, 251 const struct switchdev_obj *obj, 252 struct netlink_ext_ack *extack) 253 { 254 if (obj->flags & SWITCHDEV_F_DEFER) 255 return switchdev_port_obj_add_defer(dev, obj); 256 ASSERT_RTNL(); 257 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 258 dev, obj, extack); 259 } 260 EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 261 262 static int switchdev_port_obj_del_now(struct net_device *dev, 263 const struct switchdev_obj *obj) 264 { 265 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, 266 dev, obj, NULL); 267 } 268 269 static void switchdev_port_obj_del_deferred(struct net_device *dev, 270 const void *data) 271 { 272 const struct switchdev_obj *obj = data; 273 int err; 274 275 err = switchdev_port_obj_del_now(dev, obj); 276 if (err && err != -EOPNOTSUPP) 277 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 278 err, obj->id); 279 if (obj->complete) 280 obj->complete(dev, err, obj->complete_priv); 281 } 282 283 static int switchdev_port_obj_del_defer(struct net_device *dev, 284 const struct switchdev_obj *obj) 285 { 286 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 287 switchdev_port_obj_del_deferred); 288 } 289 290 /** 291 * switchdev_port_obj_del - Delete port object 292 * 293 * @dev: port device 294 * @obj: object to delete 295 * 296 * rtnl_lock must be held and must not be in atomic section, 297 * in case SWITCHDEV_F_DEFER flag is not set. 298 */ 299 int switchdev_port_obj_del(struct net_device *dev, 300 const struct switchdev_obj *obj) 301 { 302 if (obj->flags & SWITCHDEV_F_DEFER) 303 return switchdev_port_obj_del_defer(dev, obj); 304 ASSERT_RTNL(); 305 return switchdev_port_obj_del_now(dev, obj); 306 } 307 EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 308 309 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); 310 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); 311 312 /** 313 * register_switchdev_notifier - Register notifier 314 * @nb: notifier_block 315 * 316 * Register switch device notifier. 317 */ 318 int register_switchdev_notifier(struct notifier_block *nb) 319 { 320 return atomic_notifier_chain_register(&switchdev_notif_chain, nb); 321 } 322 EXPORT_SYMBOL_GPL(register_switchdev_notifier); 323 324 /** 325 * unregister_switchdev_notifier - Unregister notifier 326 * @nb: notifier_block 327 * 328 * Unregister switch device notifier. 329 */ 330 int unregister_switchdev_notifier(struct notifier_block *nb) 331 { 332 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb); 333 } 334 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 335 336 /** 337 * call_switchdev_notifiers - Call notifiers 338 * @val: value passed unmodified to notifier function 339 * @dev: port device 340 * @info: notifier information data 341 * @extack: netlink extended ack 342 * Call all network notifier blocks. 343 */ 344 int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 345 struct switchdev_notifier_info *info, 346 struct netlink_ext_ack *extack) 347 { 348 info->dev = dev; 349 info->extack = extack; 350 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); 351 } 352 EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 353 354 int register_switchdev_blocking_notifier(struct notifier_block *nb) 355 { 356 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 357 358 return blocking_notifier_chain_register(chain, nb); 359 } 360 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); 361 362 int unregister_switchdev_blocking_notifier(struct notifier_block *nb) 363 { 364 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 365 366 return blocking_notifier_chain_unregister(chain, nb); 367 } 368 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); 369 370 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, 371 struct switchdev_notifier_info *info, 372 struct netlink_ext_ack *extack) 373 { 374 info->dev = dev; 375 info->extack = extack; 376 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, 377 val, info); 378 } 379 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); 380 381 struct switchdev_nested_priv { 382 bool (*check_cb)(const struct net_device *dev); 383 bool (*foreign_dev_check_cb)(const struct net_device *dev, 384 const struct net_device *foreign_dev); 385 const struct net_device *dev; 386 struct net_device *lower_dev; 387 }; 388 389 static int switchdev_lower_dev_walk(struct net_device *lower_dev, 390 struct netdev_nested_priv *priv) 391 { 392 struct switchdev_nested_priv *switchdev_priv = priv->data; 393 bool (*foreign_dev_check_cb)(const struct net_device *dev, 394 const struct net_device *foreign_dev); 395 bool (*check_cb)(const struct net_device *dev); 396 const struct net_device *dev; 397 398 check_cb = switchdev_priv->check_cb; 399 foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb; 400 dev = switchdev_priv->dev; 401 402 if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) { 403 switchdev_priv->lower_dev = lower_dev; 404 return 1; 405 } 406 407 return 0; 408 } 409 410 static struct net_device * 411 switchdev_lower_dev_find(struct net_device *dev, 412 bool (*check_cb)(const struct net_device *dev), 413 bool (*foreign_dev_check_cb)(const struct net_device *dev, 414 const struct net_device *foreign_dev)) 415 { 416 struct switchdev_nested_priv switchdev_priv = { 417 .check_cb = check_cb, 418 .foreign_dev_check_cb = foreign_dev_check_cb, 419 .dev = dev, 420 .lower_dev = NULL, 421 }; 422 struct netdev_nested_priv priv = { 423 .data = &switchdev_priv, 424 }; 425 426 netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv); 427 428 return switchdev_priv.lower_dev; 429 } 430 431 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev, 432 struct net_device *orig_dev, unsigned long event, 433 const struct switchdev_notifier_fdb_info *fdb_info, 434 bool (*check_cb)(const struct net_device *dev), 435 bool (*foreign_dev_check_cb)(const struct net_device *dev, 436 const struct net_device *foreign_dev), 437 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, 438 unsigned long event, const void *ctx, 439 const struct switchdev_notifier_fdb_info *fdb_info), 440 int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, 441 unsigned long event, const void *ctx, 442 const struct switchdev_notifier_fdb_info *fdb_info)) 443 { 444 const struct switchdev_notifier_info *info = &fdb_info->info; 445 struct net_device *br, *lower_dev; 446 struct list_head *iter; 447 int err = -EOPNOTSUPP; 448 449 if (check_cb(dev)) 450 return mod_cb(dev, orig_dev, event, info->ctx, fdb_info); 451 452 if (netif_is_lag_master(dev)) { 453 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) 454 goto maybe_bridged_with_us; 455 456 /* This is a LAG interface that we offload */ 457 if (!lag_mod_cb) 458 return -EOPNOTSUPP; 459 460 return lag_mod_cb(dev, orig_dev, event, info->ctx, fdb_info); 461 } 462 463 /* Recurse through lower interfaces in case the FDB entry is pointing 464 * towards a bridge device. 465 */ 466 if (netif_is_bridge_master(dev)) { 467 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) 468 return 0; 469 470 /* This is a bridge interface that we offload */ 471 netdev_for_each_lower_dev(dev, lower_dev, iter) { 472 /* Do not propagate FDB entries across bridges */ 473 if (netif_is_bridge_master(lower_dev)) 474 continue; 475 476 /* Bridge ports might be either us, or LAG interfaces 477 * that we offload. 478 */ 479 if (!check_cb(lower_dev) && 480 !switchdev_lower_dev_find(lower_dev, check_cb, 481 foreign_dev_check_cb)) 482 continue; 483 484 err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev, 485 event, fdb_info, check_cb, 486 foreign_dev_check_cb, 487 mod_cb, lag_mod_cb); 488 if (err && err != -EOPNOTSUPP) 489 return err; 490 } 491 492 return 0; 493 } 494 495 maybe_bridged_with_us: 496 /* Event is neither on a bridge nor a LAG. Check whether it is on an 497 * interface that is in a bridge with us. 498 */ 499 br = netdev_master_upper_dev_get_rcu(dev); 500 if (!br || !netif_is_bridge_master(br)) 501 return 0; 502 503 if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb)) 504 return 0; 505 506 return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info, 507 check_cb, foreign_dev_check_cb, 508 mod_cb, lag_mod_cb); 509 } 510 511 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event, 512 const struct switchdev_notifier_fdb_info *fdb_info, 513 bool (*check_cb)(const struct net_device *dev), 514 bool (*foreign_dev_check_cb)(const struct net_device *dev, 515 const struct net_device *foreign_dev), 516 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, 517 unsigned long event, const void *ctx, 518 const struct switchdev_notifier_fdb_info *fdb_info), 519 int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, 520 unsigned long event, const void *ctx, 521 const struct switchdev_notifier_fdb_info *fdb_info)) 522 { 523 int err; 524 525 err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info, 526 check_cb, foreign_dev_check_cb, 527 mod_cb, lag_mod_cb); 528 if (err == -EOPNOTSUPP) 529 err = 0; 530 531 return err; 532 } 533 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device); 534 535 static int __switchdev_handle_port_obj_add(struct net_device *dev, 536 struct switchdev_notifier_port_obj_info *port_obj_info, 537 bool (*check_cb)(const struct net_device *dev), 538 int (*add_cb)(struct net_device *dev, const void *ctx, 539 const struct switchdev_obj *obj, 540 struct netlink_ext_ack *extack)) 541 { 542 struct switchdev_notifier_info *info = &port_obj_info->info; 543 struct netlink_ext_ack *extack; 544 struct net_device *lower_dev; 545 struct list_head *iter; 546 int err = -EOPNOTSUPP; 547 548 extack = switchdev_notifier_info_to_extack(info); 549 550 if (check_cb(dev)) { 551 err = add_cb(dev, info->ctx, port_obj_info->obj, extack); 552 if (err != -EOPNOTSUPP) 553 port_obj_info->handled = true; 554 return err; 555 } 556 557 /* Switch ports might be stacked under e.g. a LAG. Ignore the 558 * unsupported devices, another driver might be able to handle them. But 559 * propagate to the callers any hard errors. 560 * 561 * If the driver does its own bookkeeping of stacked ports, it's not 562 * necessary to go through this helper. 563 */ 564 netdev_for_each_lower_dev(dev, lower_dev, iter) { 565 if (netif_is_bridge_master(lower_dev)) 566 continue; 567 568 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, 569 check_cb, add_cb); 570 if (err && err != -EOPNOTSUPP) 571 return err; 572 } 573 574 return err; 575 } 576 577 int switchdev_handle_port_obj_add(struct net_device *dev, 578 struct switchdev_notifier_port_obj_info *port_obj_info, 579 bool (*check_cb)(const struct net_device *dev), 580 int (*add_cb)(struct net_device *dev, const void *ctx, 581 const struct switchdev_obj *obj, 582 struct netlink_ext_ack *extack)) 583 { 584 int err; 585 586 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, 587 add_cb); 588 if (err == -EOPNOTSUPP) 589 err = 0; 590 return err; 591 } 592 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); 593 594 static int __switchdev_handle_port_obj_del(struct net_device *dev, 595 struct switchdev_notifier_port_obj_info *port_obj_info, 596 bool (*check_cb)(const struct net_device *dev), 597 int (*del_cb)(struct net_device *dev, const void *ctx, 598 const struct switchdev_obj *obj)) 599 { 600 struct switchdev_notifier_info *info = &port_obj_info->info; 601 struct net_device *lower_dev; 602 struct list_head *iter; 603 int err = -EOPNOTSUPP; 604 605 if (check_cb(dev)) { 606 err = del_cb(dev, info->ctx, port_obj_info->obj); 607 if (err != -EOPNOTSUPP) 608 port_obj_info->handled = true; 609 return err; 610 } 611 612 /* Switch ports might be stacked under e.g. a LAG. Ignore the 613 * unsupported devices, another driver might be able to handle them. But 614 * propagate to the callers any hard errors. 615 * 616 * If the driver does its own bookkeeping of stacked ports, it's not 617 * necessary to go through this helper. 618 */ 619 netdev_for_each_lower_dev(dev, lower_dev, iter) { 620 if (netif_is_bridge_master(lower_dev)) 621 continue; 622 623 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, 624 check_cb, del_cb); 625 if (err && err != -EOPNOTSUPP) 626 return err; 627 } 628 629 return err; 630 } 631 632 int switchdev_handle_port_obj_del(struct net_device *dev, 633 struct switchdev_notifier_port_obj_info *port_obj_info, 634 bool (*check_cb)(const struct net_device *dev), 635 int (*del_cb)(struct net_device *dev, const void *ctx, 636 const struct switchdev_obj *obj)) 637 { 638 int err; 639 640 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, 641 del_cb); 642 if (err == -EOPNOTSUPP) 643 err = 0; 644 return err; 645 } 646 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); 647 648 static int __switchdev_handle_port_attr_set(struct net_device *dev, 649 struct switchdev_notifier_port_attr_info *port_attr_info, 650 bool (*check_cb)(const struct net_device *dev), 651 int (*set_cb)(struct net_device *dev, const void *ctx, 652 const struct switchdev_attr *attr, 653 struct netlink_ext_ack *extack)) 654 { 655 struct switchdev_notifier_info *info = &port_attr_info->info; 656 struct netlink_ext_ack *extack; 657 struct net_device *lower_dev; 658 struct list_head *iter; 659 int err = -EOPNOTSUPP; 660 661 extack = switchdev_notifier_info_to_extack(info); 662 663 if (check_cb(dev)) { 664 err = set_cb(dev, info->ctx, port_attr_info->attr, extack); 665 if (err != -EOPNOTSUPP) 666 port_attr_info->handled = true; 667 return err; 668 } 669 670 /* Switch ports might be stacked under e.g. a LAG. Ignore the 671 * unsupported devices, another driver might be able to handle them. But 672 * propagate to the callers any hard errors. 673 * 674 * If the driver does its own bookkeeping of stacked ports, it's not 675 * necessary to go through this helper. 676 */ 677 netdev_for_each_lower_dev(dev, lower_dev, iter) { 678 if (netif_is_bridge_master(lower_dev)) 679 continue; 680 681 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info, 682 check_cb, set_cb); 683 if (err && err != -EOPNOTSUPP) 684 return err; 685 } 686 687 return err; 688 } 689 690 int switchdev_handle_port_attr_set(struct net_device *dev, 691 struct switchdev_notifier_port_attr_info *port_attr_info, 692 bool (*check_cb)(const struct net_device *dev), 693 int (*set_cb)(struct net_device *dev, const void *ctx, 694 const struct switchdev_attr *attr, 695 struct netlink_ext_ack *extack)) 696 { 697 int err; 698 699 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb, 700 set_cb); 701 if (err == -EOPNOTSUPP) 702 err = 0; 703 return err; 704 } 705 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set); 706 707 int switchdev_bridge_port_offload(struct net_device *brport_dev, 708 struct net_device *dev, const void *ctx, 709 struct notifier_block *atomic_nb, 710 struct notifier_block *blocking_nb, 711 bool tx_fwd_offload, 712 struct netlink_ext_ack *extack) 713 { 714 struct switchdev_notifier_brport_info brport_info = { 715 .brport = { 716 .dev = dev, 717 .ctx = ctx, 718 .atomic_nb = atomic_nb, 719 .blocking_nb = blocking_nb, 720 .tx_fwd_offload = tx_fwd_offload, 721 }, 722 }; 723 int err; 724 725 ASSERT_RTNL(); 726 727 err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED, 728 brport_dev, &brport_info.info, 729 extack); 730 return notifier_to_errno(err); 731 } 732 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload); 733 734 void switchdev_bridge_port_unoffload(struct net_device *brport_dev, 735 const void *ctx, 736 struct notifier_block *atomic_nb, 737 struct notifier_block *blocking_nb) 738 { 739 struct switchdev_notifier_brport_info brport_info = { 740 .brport = { 741 .ctx = ctx, 742 .atomic_nb = atomic_nb, 743 .blocking_nb = blocking_nb, 744 }, 745 }; 746 747 ASSERT_RTNL(); 748 749 call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED, 750 brport_dev, &brport_info.info, 751 NULL); 752 } 753 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload); 754