1 /* 2 * net/switchdev/switchdev.c - Switch device API 3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> 4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/mutex.h> 16 #include <linux/notifier.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/if_bridge.h> 20 #include <linux/list.h> 21 #include <linux/workqueue.h> 22 #include <linux/if_vlan.h> 23 #include <linux/rtnetlink.h> 24 #include <net/switchdev.h> 25 26 /** 27 * switchdev_trans_item_enqueue - Enqueue data item to transaction queue 28 * 29 * @trans: transaction 30 * @data: pointer to data being queued 31 * @destructor: data destructor 32 * @tritem: transaction item being queued 33 * 34 * Enqeueue data item to transaction queue. tritem is typically placed in 35 * cointainter pointed at by data pointer. Destructor is called on 36 * transaction abort and after successful commit phase in case 37 * the caller did not dequeue the item before. 38 */ 39 void switchdev_trans_item_enqueue(struct switchdev_trans *trans, 40 void *data, void (*destructor)(void const *), 41 struct switchdev_trans_item *tritem) 42 { 43 tritem->data = data; 44 tritem->destructor = destructor; 45 list_add_tail(&tritem->list, &trans->item_list); 46 } 47 EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue); 48 49 static struct switchdev_trans_item * 50 __switchdev_trans_item_dequeue(struct switchdev_trans *trans) 51 { 52 struct switchdev_trans_item *tritem; 53 54 if (list_empty(&trans->item_list)) 55 return NULL; 56 tritem = list_first_entry(&trans->item_list, 57 struct switchdev_trans_item, list); 58 list_del(&tritem->list); 59 return tritem; 60 } 61 62 /** 63 * switchdev_trans_item_dequeue - Dequeue data item from transaction queue 64 * 65 * @trans: transaction 66 */ 67 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans) 68 { 69 struct switchdev_trans_item *tritem; 70 71 tritem = __switchdev_trans_item_dequeue(trans); 72 BUG_ON(!tritem); 73 return tritem->data; 74 } 75 EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue); 76 77 static void switchdev_trans_init(struct switchdev_trans *trans) 78 { 79 INIT_LIST_HEAD(&trans->item_list); 80 } 81 82 static void switchdev_trans_items_destroy(struct switchdev_trans *trans) 83 { 84 struct switchdev_trans_item *tritem; 85 86 while ((tritem = __switchdev_trans_item_dequeue(trans))) 87 tritem->destructor(tritem->data); 88 } 89 90 static void switchdev_trans_items_warn_destroy(struct net_device *dev, 91 struct switchdev_trans *trans) 92 { 93 WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n", 94 dev->name); 95 switchdev_trans_items_destroy(trans); 96 } 97 98 static LIST_HEAD(deferred); 99 static DEFINE_SPINLOCK(deferred_lock); 100 101 typedef void switchdev_deferred_func_t(struct net_device *dev, 102 const void *data); 103 104 struct switchdev_deferred_item { 105 struct list_head list; 106 struct net_device *dev; 107 switchdev_deferred_func_t *func; 108 unsigned long data[0]; 109 }; 110 111 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) 112 { 113 struct switchdev_deferred_item *dfitem; 114 115 spin_lock_bh(&deferred_lock); 116 if (list_empty(&deferred)) { 117 dfitem = NULL; 118 goto unlock; 119 } 120 dfitem = list_first_entry(&deferred, 121 struct switchdev_deferred_item, list); 122 list_del(&dfitem->list); 123 unlock: 124 spin_unlock_bh(&deferred_lock); 125 return dfitem; 126 } 127 128 /** 129 * switchdev_deferred_process - Process ops in deferred queue 130 * 131 * Called to flush the ops currently queued in deferred ops queue. 132 * rtnl_lock must be held. 133 */ 134 void switchdev_deferred_process(void) 135 { 136 struct switchdev_deferred_item *dfitem; 137 138 ASSERT_RTNL(); 139 140 while ((dfitem = switchdev_deferred_dequeue())) { 141 dfitem->func(dfitem->dev, dfitem->data); 142 dev_put(dfitem->dev); 143 kfree(dfitem); 144 } 145 } 146 EXPORT_SYMBOL_GPL(switchdev_deferred_process); 147 148 static void switchdev_deferred_process_work(struct work_struct *work) 149 { 150 rtnl_lock(); 151 switchdev_deferred_process(); 152 rtnl_unlock(); 153 } 154 155 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); 156 157 static int switchdev_deferred_enqueue(struct net_device *dev, 158 const void *data, size_t data_len, 159 switchdev_deferred_func_t *func) 160 { 161 struct switchdev_deferred_item *dfitem; 162 163 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); 164 if (!dfitem) 165 return -ENOMEM; 166 dfitem->dev = dev; 167 dfitem->func = func; 168 memcpy(dfitem->data, data, data_len); 169 dev_hold(dev); 170 spin_lock_bh(&deferred_lock); 171 list_add_tail(&dfitem->list, &deferred); 172 spin_unlock_bh(&deferred_lock); 173 schedule_work(&deferred_process_work); 174 return 0; 175 } 176 177 /** 178 * switchdev_port_attr_get - Get port attribute 179 * 180 * @dev: port device 181 * @attr: attribute to get 182 */ 183 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) 184 { 185 const struct switchdev_ops *ops = dev->switchdev_ops; 186 struct net_device *lower_dev; 187 struct list_head *iter; 188 struct switchdev_attr first = { 189 .id = SWITCHDEV_ATTR_ID_UNDEFINED 190 }; 191 int err = -EOPNOTSUPP; 192 193 if (ops && ops->switchdev_port_attr_get) 194 return ops->switchdev_port_attr_get(dev, attr); 195 196 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 197 return err; 198 199 /* Switch device port(s) may be stacked under 200 * bond/team/vlan dev, so recurse down to get attr on 201 * each port. Return -ENODATA if attr values don't 202 * compare across ports. 203 */ 204 205 netdev_for_each_lower_dev(dev, lower_dev, iter) { 206 err = switchdev_port_attr_get(lower_dev, attr); 207 if (err) 208 break; 209 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED) 210 first = *attr; 211 else if (memcmp(&first, attr, sizeof(*attr))) 212 return -ENODATA; 213 } 214 215 return err; 216 } 217 EXPORT_SYMBOL_GPL(switchdev_port_attr_get); 218 219 static int __switchdev_port_attr_set(struct net_device *dev, 220 const struct switchdev_attr *attr, 221 struct switchdev_trans *trans) 222 { 223 const struct switchdev_ops *ops = dev->switchdev_ops; 224 struct net_device *lower_dev; 225 struct list_head *iter; 226 int err = -EOPNOTSUPP; 227 228 if (ops && ops->switchdev_port_attr_set) { 229 err = ops->switchdev_port_attr_set(dev, attr, trans); 230 goto done; 231 } 232 233 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 234 goto done; 235 236 /* Switch device port(s) may be stacked under 237 * bond/team/vlan dev, so recurse down to set attr on 238 * each port. 239 */ 240 241 netdev_for_each_lower_dev(dev, lower_dev, iter) { 242 err = __switchdev_port_attr_set(lower_dev, attr, trans); 243 if (err) 244 break; 245 } 246 247 done: 248 if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP) 249 err = 0; 250 251 return err; 252 } 253 254 static int switchdev_port_attr_set_now(struct net_device *dev, 255 const struct switchdev_attr *attr) 256 { 257 struct switchdev_trans trans; 258 int err; 259 260 switchdev_trans_init(&trans); 261 262 /* Phase I: prepare for attr set. Driver/device should fail 263 * here if there are going to be issues in the commit phase, 264 * such as lack of resources or support. The driver/device 265 * should reserve resources needed for the commit phase here, 266 * but should not commit the attr. 267 */ 268 269 trans.ph_prepare = true; 270 err = __switchdev_port_attr_set(dev, attr, &trans); 271 if (err) { 272 /* Prepare phase failed: abort the transaction. Any 273 * resources reserved in the prepare phase are 274 * released. 275 */ 276 277 if (err != -EOPNOTSUPP) 278 switchdev_trans_items_destroy(&trans); 279 280 return err; 281 } 282 283 /* Phase II: commit attr set. This cannot fail as a fault 284 * of driver/device. If it does, it's a bug in the driver/device 285 * because the driver said everythings was OK in phase I. 286 */ 287 288 trans.ph_prepare = false; 289 err = __switchdev_port_attr_set(dev, attr, &trans); 290 WARN(err, "%s: Commit of attribute (id=%d) failed.\n", 291 dev->name, attr->id); 292 switchdev_trans_items_warn_destroy(dev, &trans); 293 294 return err; 295 } 296 297 static void switchdev_port_attr_set_deferred(struct net_device *dev, 298 const void *data) 299 { 300 const struct switchdev_attr *attr = data; 301 int err; 302 303 err = switchdev_port_attr_set_now(dev, attr); 304 if (err && err != -EOPNOTSUPP) 305 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 306 err, attr->id); 307 if (attr->complete) 308 attr->complete(dev, err, attr->complete_priv); 309 } 310 311 static int switchdev_port_attr_set_defer(struct net_device *dev, 312 const struct switchdev_attr *attr) 313 { 314 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), 315 switchdev_port_attr_set_deferred); 316 } 317 318 /** 319 * switchdev_port_attr_set - Set port attribute 320 * 321 * @dev: port device 322 * @attr: attribute to set 323 * 324 * Use a 2-phase prepare-commit transaction model to ensure 325 * system is not left in a partially updated state due to 326 * failure from driver/device. 327 * 328 * rtnl_lock must be held and must not be in atomic section, 329 * in case SWITCHDEV_F_DEFER flag is not set. 330 */ 331 int switchdev_port_attr_set(struct net_device *dev, 332 const struct switchdev_attr *attr) 333 { 334 if (attr->flags & SWITCHDEV_F_DEFER) 335 return switchdev_port_attr_set_defer(dev, attr); 336 ASSERT_RTNL(); 337 return switchdev_port_attr_set_now(dev, attr); 338 } 339 EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 340 341 static size_t switchdev_obj_size(const struct switchdev_obj *obj) 342 { 343 switch (obj->id) { 344 case SWITCHDEV_OBJ_ID_PORT_VLAN: 345 return sizeof(struct switchdev_obj_port_vlan); 346 case SWITCHDEV_OBJ_ID_PORT_MDB: 347 return sizeof(struct switchdev_obj_port_mdb); 348 case SWITCHDEV_OBJ_ID_HOST_MDB: 349 return sizeof(struct switchdev_obj_port_mdb); 350 default: 351 BUG(); 352 } 353 return 0; 354 } 355 356 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, 357 struct net_device *dev, 358 const struct switchdev_obj *obj, 359 struct switchdev_trans *trans, 360 struct netlink_ext_ack *extack) 361 { 362 int rc; 363 int err; 364 365 struct switchdev_notifier_port_obj_info obj_info = { 366 .obj = obj, 367 .trans = trans, 368 .handled = false, 369 }; 370 371 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); 372 err = notifier_to_errno(rc); 373 if (err) { 374 WARN_ON(!obj_info.handled); 375 return err; 376 } 377 if (!obj_info.handled) 378 return -EOPNOTSUPP; 379 return 0; 380 } 381 382 static int switchdev_port_obj_add_now(struct net_device *dev, 383 const struct switchdev_obj *obj, 384 struct netlink_ext_ack *extack) 385 { 386 struct switchdev_trans trans; 387 int err; 388 389 ASSERT_RTNL(); 390 391 switchdev_trans_init(&trans); 392 393 /* Phase I: prepare for obj add. Driver/device should fail 394 * here if there are going to be issues in the commit phase, 395 * such as lack of resources or support. The driver/device 396 * should reserve resources needed for the commit phase here, 397 * but should not commit the obj. 398 */ 399 400 trans.ph_prepare = true; 401 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 402 dev, obj, &trans, extack); 403 if (err) { 404 /* Prepare phase failed: abort the transaction. Any 405 * resources reserved in the prepare phase are 406 * released. 407 */ 408 409 if (err != -EOPNOTSUPP) 410 switchdev_trans_items_destroy(&trans); 411 412 return err; 413 } 414 415 /* Phase II: commit obj add. This cannot fail as a fault 416 * of driver/device. If it does, it's a bug in the driver/device 417 * because the driver said everythings was OK in phase I. 418 */ 419 420 trans.ph_prepare = false; 421 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 422 dev, obj, &trans, extack); 423 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); 424 switchdev_trans_items_warn_destroy(dev, &trans); 425 426 return err; 427 } 428 429 static void switchdev_port_obj_add_deferred(struct net_device *dev, 430 const void *data) 431 { 432 const struct switchdev_obj *obj = data; 433 int err; 434 435 err = switchdev_port_obj_add_now(dev, obj, NULL); 436 if (err && err != -EOPNOTSUPP) 437 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 438 err, obj->id); 439 if (obj->complete) 440 obj->complete(dev, err, obj->complete_priv); 441 } 442 443 static int switchdev_port_obj_add_defer(struct net_device *dev, 444 const struct switchdev_obj *obj) 445 { 446 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 447 switchdev_port_obj_add_deferred); 448 } 449 450 /** 451 * switchdev_port_obj_add - Add port object 452 * 453 * @dev: port device 454 * @id: object ID 455 * @obj: object to add 456 * 457 * Use a 2-phase prepare-commit transaction model to ensure 458 * system is not left in a partially updated state due to 459 * failure from driver/device. 460 * 461 * rtnl_lock must be held and must not be in atomic section, 462 * in case SWITCHDEV_F_DEFER flag is not set. 463 */ 464 int switchdev_port_obj_add(struct net_device *dev, 465 const struct switchdev_obj *obj, 466 struct netlink_ext_ack *extack) 467 { 468 if (obj->flags & SWITCHDEV_F_DEFER) 469 return switchdev_port_obj_add_defer(dev, obj); 470 ASSERT_RTNL(); 471 return switchdev_port_obj_add_now(dev, obj, extack); 472 } 473 EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 474 475 static int switchdev_port_obj_del_now(struct net_device *dev, 476 const struct switchdev_obj *obj) 477 { 478 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, 479 dev, obj, NULL, NULL); 480 } 481 482 static void switchdev_port_obj_del_deferred(struct net_device *dev, 483 const void *data) 484 { 485 const struct switchdev_obj *obj = data; 486 int err; 487 488 err = switchdev_port_obj_del_now(dev, obj); 489 if (err && err != -EOPNOTSUPP) 490 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 491 err, obj->id); 492 if (obj->complete) 493 obj->complete(dev, err, obj->complete_priv); 494 } 495 496 static int switchdev_port_obj_del_defer(struct net_device *dev, 497 const struct switchdev_obj *obj) 498 { 499 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 500 switchdev_port_obj_del_deferred); 501 } 502 503 /** 504 * switchdev_port_obj_del - Delete port object 505 * 506 * @dev: port device 507 * @id: object ID 508 * @obj: object to delete 509 * 510 * rtnl_lock must be held and must not be in atomic section, 511 * in case SWITCHDEV_F_DEFER flag is not set. 512 */ 513 int switchdev_port_obj_del(struct net_device *dev, 514 const struct switchdev_obj *obj) 515 { 516 if (obj->flags & SWITCHDEV_F_DEFER) 517 return switchdev_port_obj_del_defer(dev, obj); 518 ASSERT_RTNL(); 519 return switchdev_port_obj_del_now(dev, obj); 520 } 521 EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 522 523 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); 524 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); 525 526 /** 527 * register_switchdev_notifier - Register notifier 528 * @nb: notifier_block 529 * 530 * Register switch device notifier. 531 */ 532 int register_switchdev_notifier(struct notifier_block *nb) 533 { 534 return atomic_notifier_chain_register(&switchdev_notif_chain, nb); 535 } 536 EXPORT_SYMBOL_GPL(register_switchdev_notifier); 537 538 /** 539 * unregister_switchdev_notifier - Unregister notifier 540 * @nb: notifier_block 541 * 542 * Unregister switch device notifier. 543 */ 544 int unregister_switchdev_notifier(struct notifier_block *nb) 545 { 546 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb); 547 } 548 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 549 550 /** 551 * call_switchdev_notifiers - Call notifiers 552 * @val: value passed unmodified to notifier function 553 * @dev: port device 554 * @info: notifier information data 555 * 556 * Call all network notifier blocks. 557 */ 558 int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 559 struct switchdev_notifier_info *info) 560 { 561 info->dev = dev; 562 info->extack = NULL; 563 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); 564 } 565 EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 566 567 int register_switchdev_blocking_notifier(struct notifier_block *nb) 568 { 569 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 570 571 return blocking_notifier_chain_register(chain, nb); 572 } 573 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); 574 575 int unregister_switchdev_blocking_notifier(struct notifier_block *nb) 576 { 577 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 578 579 return blocking_notifier_chain_unregister(chain, nb); 580 } 581 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); 582 583 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, 584 struct switchdev_notifier_info *info, 585 struct netlink_ext_ack *extack) 586 { 587 info->dev = dev; 588 info->extack = extack; 589 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, 590 val, info); 591 } 592 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); 593 594 bool switchdev_port_same_parent_id(struct net_device *a, 595 struct net_device *b) 596 { 597 struct switchdev_attr a_attr = { 598 .orig_dev = a, 599 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 600 }; 601 struct switchdev_attr b_attr = { 602 .orig_dev = b, 603 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 604 }; 605 606 if (switchdev_port_attr_get(a, &a_attr) || 607 switchdev_port_attr_get(b, &b_attr)) 608 return false; 609 610 return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid); 611 } 612 EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id); 613 614 static int __switchdev_handle_port_obj_add(struct net_device *dev, 615 struct switchdev_notifier_port_obj_info *port_obj_info, 616 bool (*check_cb)(const struct net_device *dev), 617 int (*add_cb)(struct net_device *dev, 618 const struct switchdev_obj *obj, 619 struct switchdev_trans *trans, 620 struct netlink_ext_ack *extack)) 621 { 622 struct netlink_ext_ack *extack; 623 struct net_device *lower_dev; 624 struct list_head *iter; 625 int err = -EOPNOTSUPP; 626 627 extack = switchdev_notifier_info_to_extack(&port_obj_info->info); 628 629 if (check_cb(dev)) { 630 /* This flag is only checked if the return value is success. */ 631 port_obj_info->handled = true; 632 return add_cb(dev, port_obj_info->obj, port_obj_info->trans, 633 extack); 634 } 635 636 /* Switch ports might be stacked under e.g. a LAG. Ignore the 637 * unsupported devices, another driver might be able to handle them. But 638 * propagate to the callers any hard errors. 639 * 640 * If the driver does its own bookkeeping of stacked ports, it's not 641 * necessary to go through this helper. 642 */ 643 netdev_for_each_lower_dev(dev, lower_dev, iter) { 644 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, 645 check_cb, add_cb); 646 if (err && err != -EOPNOTSUPP) 647 return err; 648 } 649 650 return err; 651 } 652 653 int switchdev_handle_port_obj_add(struct net_device *dev, 654 struct switchdev_notifier_port_obj_info *port_obj_info, 655 bool (*check_cb)(const struct net_device *dev), 656 int (*add_cb)(struct net_device *dev, 657 const struct switchdev_obj *obj, 658 struct switchdev_trans *trans, 659 struct netlink_ext_ack *extack)) 660 { 661 int err; 662 663 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, 664 add_cb); 665 if (err == -EOPNOTSUPP) 666 err = 0; 667 return err; 668 } 669 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); 670 671 static int __switchdev_handle_port_obj_del(struct net_device *dev, 672 struct switchdev_notifier_port_obj_info *port_obj_info, 673 bool (*check_cb)(const struct net_device *dev), 674 int (*del_cb)(struct net_device *dev, 675 const struct switchdev_obj *obj)) 676 { 677 struct net_device *lower_dev; 678 struct list_head *iter; 679 int err = -EOPNOTSUPP; 680 681 if (check_cb(dev)) { 682 /* This flag is only checked if the return value is success. */ 683 port_obj_info->handled = true; 684 return del_cb(dev, port_obj_info->obj); 685 } 686 687 /* Switch ports might be stacked under e.g. a LAG. Ignore the 688 * unsupported devices, another driver might be able to handle them. But 689 * propagate to the callers any hard errors. 690 * 691 * If the driver does its own bookkeeping of stacked ports, it's not 692 * necessary to go through this helper. 693 */ 694 netdev_for_each_lower_dev(dev, lower_dev, iter) { 695 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, 696 check_cb, del_cb); 697 if (err && err != -EOPNOTSUPP) 698 return err; 699 } 700 701 return err; 702 } 703 704 int switchdev_handle_port_obj_del(struct net_device *dev, 705 struct switchdev_notifier_port_obj_info *port_obj_info, 706 bool (*check_cb)(const struct net_device *dev), 707 int (*del_cb)(struct net_device *dev, 708 const struct switchdev_obj *obj)) 709 { 710 int err; 711 712 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, 713 del_cb); 714 if (err == -EOPNOTSUPP) 715 err = 0; 716 return err; 717 } 718 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); 719