1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/list.h> 4 #include <linux/netdevice.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/skbuff.h> 7 #include <net/ip.h> 8 #include <net/switchdev.h> 9 10 #include "br_private.h" 11 12 static struct static_key_false br_switchdev_tx_fwd_offload; 13 14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p, 15 const struct sk_buff *skb) 16 { 17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 18 return false; 19 20 return (p->flags & BR_TX_FWD_OFFLOAD) && 21 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom); 22 } 23 24 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb) 25 { 26 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 27 return false; 28 29 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload; 30 } 31 32 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb) 33 { 34 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb); 35 } 36 37 /* Mark the frame for TX forwarding offload if this egress port supports it */ 38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p, 39 struct sk_buff *skb) 40 { 41 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 42 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true; 43 } 44 45 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms 46 * that the skb has been already forwarded to, to avoid further cloning to 47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress() 48 * return false. 49 */ 50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p, 51 struct sk_buff *skb) 52 { 53 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 54 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms); 55 } 56 57 void nbp_switchdev_frame_mark(const struct net_bridge_port *p, 58 struct sk_buff *skb) 59 { 60 if (p->hwdom) 61 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom; 62 } 63 64 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, 65 const struct sk_buff *skb) 66 { 67 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb); 68 69 return !test_bit(p->hwdom, &cb->fwd_hwdoms) && 70 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom); 71 } 72 73 /* Flags that can be offloaded to hardware */ 74 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \ 75 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \ 76 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST) 77 78 int br_switchdev_set_port_flag(struct net_bridge_port *p, 79 unsigned long flags, 80 unsigned long mask, 81 struct netlink_ext_ack *extack) 82 { 83 struct switchdev_attr attr = { 84 .orig_dev = p->dev, 85 }; 86 struct switchdev_notifier_port_attr_info info = { 87 .attr = &attr, 88 }; 89 int err; 90 91 mask &= BR_PORT_FLAGS_HW_OFFLOAD; 92 if (!mask) 93 return 0; 94 95 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS; 96 attr.u.brport_flags.val = flags; 97 attr.u.brport_flags.mask = mask; 98 99 /* We run from atomic context here */ 100 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev, 101 &info.info, extack); 102 err = notifier_to_errno(err); 103 if (err == -EOPNOTSUPP) 104 return 0; 105 106 if (err) { 107 if (extack && !extack->_msg) 108 NL_SET_ERR_MSG_MOD(extack, 109 "bridge flag offload is not supported"); 110 return -EOPNOTSUPP; 111 } 112 113 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS; 114 attr.flags = SWITCHDEV_F_DEFER; 115 116 err = switchdev_port_attr_set(p->dev, &attr, extack); 117 if (err) { 118 if (extack && !extack->_msg) 119 NL_SET_ERR_MSG_MOD(extack, 120 "error setting offload flag on port"); 121 return err; 122 } 123 124 return 0; 125 } 126 127 static void br_switchdev_fdb_populate(struct net_bridge *br, 128 struct switchdev_notifier_fdb_info *item, 129 const struct net_bridge_fdb_entry *fdb, 130 const void *ctx) 131 { 132 const struct net_bridge_port *p = READ_ONCE(fdb->dst); 133 134 item->addr = fdb->key.addr.addr; 135 item->vid = fdb->key.vlan_id; 136 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 137 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags); 138 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); 139 item->info.dev = (!p || item->is_local) ? br->dev : p->dev; 140 item->info.ctx = ctx; 141 } 142 143 void 144 br_switchdev_fdb_notify(struct net_bridge *br, 145 const struct net_bridge_fdb_entry *fdb, int type) 146 { 147 struct switchdev_notifier_fdb_info item; 148 149 br_switchdev_fdb_populate(br, &item, fdb, NULL); 150 151 switch (type) { 152 case RTM_DELNEIGH: 153 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, 154 item.info.dev, &item.info, NULL); 155 break; 156 case RTM_NEWNEIGH: 157 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, 158 item.info.dev, &item.info, NULL); 159 break; 160 } 161 } 162 163 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags, 164 bool changed, struct netlink_ext_ack *extack) 165 { 166 struct switchdev_obj_port_vlan v = { 167 .obj.orig_dev = dev, 168 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 169 .flags = flags, 170 .vid = vid, 171 .changed = changed, 172 }; 173 174 return switchdev_port_obj_add(dev, &v.obj, extack); 175 } 176 177 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid) 178 { 179 struct switchdev_obj_port_vlan v = { 180 .obj.orig_dev = dev, 181 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 182 .vid = vid, 183 }; 184 185 return switchdev_port_obj_del(dev, &v.obj); 186 } 187 188 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining) 189 { 190 struct net_bridge *br = joining->br; 191 struct net_bridge_port *p; 192 int hwdom; 193 194 /* joining is yet to be added to the port list. */ 195 list_for_each_entry(p, &br->port_list, list) { 196 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) { 197 joining->hwdom = p->hwdom; 198 return 0; 199 } 200 } 201 202 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1); 203 if (hwdom >= BR_HWDOM_MAX) 204 return -EBUSY; 205 206 set_bit(hwdom, &br->busy_hwdoms); 207 joining->hwdom = hwdom; 208 return 0; 209 } 210 211 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving) 212 { 213 struct net_bridge *br = leaving->br; 214 struct net_bridge_port *p; 215 216 /* leaving is no longer in the port list. */ 217 list_for_each_entry(p, &br->port_list, list) { 218 if (p->hwdom == leaving->hwdom) 219 return; 220 } 221 222 clear_bit(leaving->hwdom, &br->busy_hwdoms); 223 } 224 225 static int nbp_switchdev_add(struct net_bridge_port *p, 226 struct netdev_phys_item_id ppid, 227 bool tx_fwd_offload, 228 struct netlink_ext_ack *extack) 229 { 230 int err; 231 232 if (p->offload_count) { 233 /* Prevent unsupported configurations such as a bridge port 234 * which is a bonding interface, and the member ports are from 235 * different hardware switches. 236 */ 237 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) { 238 NL_SET_ERR_MSG_MOD(extack, 239 "Same bridge port cannot be offloaded by two physical switches"); 240 return -EBUSY; 241 } 242 243 /* Tolerate drivers that call switchdev_bridge_port_offload() 244 * more than once for the same bridge port, such as when the 245 * bridge port is an offloaded bonding/team interface. 246 */ 247 p->offload_count++; 248 249 return 0; 250 } 251 252 p->ppid = ppid; 253 p->offload_count = 1; 254 255 err = nbp_switchdev_hwdom_set(p); 256 if (err) 257 return err; 258 259 if (tx_fwd_offload) { 260 p->flags |= BR_TX_FWD_OFFLOAD; 261 static_branch_inc(&br_switchdev_tx_fwd_offload); 262 } 263 264 return 0; 265 } 266 267 static void nbp_switchdev_del(struct net_bridge_port *p) 268 { 269 if (WARN_ON(!p->offload_count)) 270 return; 271 272 p->offload_count--; 273 274 if (p->offload_count) 275 return; 276 277 if (p->hwdom) 278 nbp_switchdev_hwdom_put(p); 279 280 if (p->flags & BR_TX_FWD_OFFLOAD) { 281 p->flags &= ~BR_TX_FWD_OFFLOAD; 282 static_branch_dec(&br_switchdev_tx_fwd_offload); 283 } 284 } 285 286 static int 287 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb, 288 const struct net_bridge_fdb_entry *fdb, 289 unsigned long action, const void *ctx) 290 { 291 struct switchdev_notifier_fdb_info item; 292 int err; 293 294 br_switchdev_fdb_populate(br, &item, fdb, ctx); 295 296 err = nb->notifier_call(nb, action, &item); 297 return notifier_to_errno(err); 298 } 299 300 static int 301 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx, 302 bool adding, struct notifier_block *nb) 303 { 304 struct net_bridge_fdb_entry *fdb; 305 struct net_bridge *br; 306 unsigned long action; 307 int err = 0; 308 309 if (!nb) 310 return 0; 311 312 if (!netif_is_bridge_master(br_dev)) 313 return -EINVAL; 314 315 br = netdev_priv(br_dev); 316 317 if (adding) 318 action = SWITCHDEV_FDB_ADD_TO_DEVICE; 319 else 320 action = SWITCHDEV_FDB_DEL_TO_DEVICE; 321 322 rcu_read_lock(); 323 324 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) { 325 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx); 326 if (err) 327 break; 328 } 329 330 rcu_read_unlock(); 331 332 return err; 333 } 334 335 static int br_switchdev_vlan_attr_replay(struct net_device *br_dev, 336 const void *ctx, 337 struct notifier_block *nb, 338 struct netlink_ext_ack *extack) 339 { 340 struct switchdev_notifier_port_attr_info attr_info = { 341 .info = { 342 .dev = br_dev, 343 .extack = extack, 344 .ctx = ctx, 345 }, 346 }; 347 struct net_bridge *br = netdev_priv(br_dev); 348 struct net_bridge_vlan_group *vg; 349 struct switchdev_attr attr; 350 struct net_bridge_vlan *v; 351 int err; 352 353 attr_info.attr = &attr; 354 attr.orig_dev = br_dev; 355 356 vg = br_vlan_group(br); 357 if (!vg) 358 return 0; 359 360 list_for_each_entry(v, &vg->vlan_list, vlist) { 361 if (v->msti) { 362 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI; 363 attr.u.vlan_msti.vid = v->vid; 364 attr.u.vlan_msti.msti = v->msti; 365 366 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET, 367 &attr_info); 368 err = notifier_to_errno(err); 369 if (err) 370 return err; 371 } 372 } 373 374 return 0; 375 } 376 377 static int 378 br_switchdev_vlan_replay_one(struct notifier_block *nb, 379 struct net_device *dev, 380 struct switchdev_obj_port_vlan *vlan, 381 const void *ctx, unsigned long action, 382 struct netlink_ext_ack *extack) 383 { 384 struct switchdev_notifier_port_obj_info obj_info = { 385 .info = { 386 .dev = dev, 387 .extack = extack, 388 .ctx = ctx, 389 }, 390 .obj = &vlan->obj, 391 }; 392 int err; 393 394 err = nb->notifier_call(nb, action, &obj_info); 395 return notifier_to_errno(err); 396 } 397 398 static int br_switchdev_vlan_replay_group(struct notifier_block *nb, 399 struct net_device *dev, 400 struct net_bridge_vlan_group *vg, 401 const void *ctx, unsigned long action, 402 struct netlink_ext_ack *extack) 403 { 404 struct net_bridge_vlan *v; 405 int err = 0; 406 u16 pvid; 407 408 if (!vg) 409 return 0; 410 411 pvid = br_get_pvid(vg); 412 413 list_for_each_entry(v, &vg->vlan_list, vlist) { 414 struct switchdev_obj_port_vlan vlan = { 415 .obj.orig_dev = dev, 416 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 417 .flags = br_vlan_flags(v, pvid), 418 .vid = v->vid, 419 }; 420 421 if (!br_vlan_should_use(v)) 422 continue; 423 424 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx, 425 action, extack); 426 if (err) 427 return err; 428 } 429 430 return 0; 431 } 432 433 static int br_switchdev_vlan_replay(struct net_device *br_dev, 434 const void *ctx, bool adding, 435 struct notifier_block *nb, 436 struct netlink_ext_ack *extack) 437 { 438 struct net_bridge *br = netdev_priv(br_dev); 439 struct net_bridge_port *p; 440 unsigned long action; 441 int err; 442 443 ASSERT_RTNL(); 444 445 if (!nb) 446 return 0; 447 448 if (!netif_is_bridge_master(br_dev)) 449 return -EINVAL; 450 451 if (adding) 452 action = SWITCHDEV_PORT_OBJ_ADD; 453 else 454 action = SWITCHDEV_PORT_OBJ_DEL; 455 456 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br), 457 ctx, action, extack); 458 if (err) 459 return err; 460 461 list_for_each_entry(p, &br->port_list, list) { 462 struct net_device *dev = p->dev; 463 464 err = br_switchdev_vlan_replay_group(nb, dev, 465 nbp_vlan_group(p), 466 ctx, action, extack); 467 if (err) 468 return err; 469 } 470 471 if (adding) { 472 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack); 473 if (err) 474 return err; 475 } 476 477 return 0; 478 } 479 480 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 481 struct br_switchdev_mdb_complete_info { 482 struct net_bridge_port *port; 483 struct br_ip ip; 484 }; 485 486 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv) 487 { 488 struct br_switchdev_mdb_complete_info *data = priv; 489 struct net_bridge_port_group __rcu **pp; 490 struct net_bridge_port_group *p; 491 struct net_bridge_mdb_entry *mp; 492 struct net_bridge_port *port = data->port; 493 struct net_bridge *br = port->br; 494 495 if (err) 496 goto err; 497 498 spin_lock_bh(&br->multicast_lock); 499 mp = br_mdb_ip_get(br, &data->ip); 500 if (!mp) 501 goto out; 502 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 503 pp = &p->next) { 504 if (p->key.port != port) 505 continue; 506 p->flags |= MDB_PG_FLAGS_OFFLOAD; 507 } 508 out: 509 spin_unlock_bh(&br->multicast_lock); 510 err: 511 kfree(priv); 512 } 513 514 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb, 515 const struct net_bridge_mdb_entry *mp) 516 { 517 if (mp->addr.proto == htons(ETH_P_IP)) 518 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr); 519 #if IS_ENABLED(CONFIG_IPV6) 520 else if (mp->addr.proto == htons(ETH_P_IPV6)) 521 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr); 522 #endif 523 else 524 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr); 525 526 mdb->vid = mp->addr.vid; 527 } 528 529 static void br_switchdev_host_mdb_one(struct net_device *dev, 530 struct net_device *lower_dev, 531 struct net_bridge_mdb_entry *mp, 532 int type) 533 { 534 struct switchdev_obj_port_mdb mdb = { 535 .obj = { 536 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 537 .flags = SWITCHDEV_F_DEFER, 538 .orig_dev = dev, 539 }, 540 }; 541 542 br_switchdev_mdb_populate(&mdb, mp); 543 544 switch (type) { 545 case RTM_NEWMDB: 546 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); 547 break; 548 case RTM_DELMDB: 549 switchdev_port_obj_del(lower_dev, &mdb.obj); 550 break; 551 } 552 } 553 554 static void br_switchdev_host_mdb(struct net_device *dev, 555 struct net_bridge_mdb_entry *mp, int type) 556 { 557 struct net_device *lower_dev; 558 struct list_head *iter; 559 560 netdev_for_each_lower_dev(dev, lower_dev, iter) 561 br_switchdev_host_mdb_one(dev, lower_dev, mp, type); 562 } 563 564 static int 565 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, 566 const struct switchdev_obj_port_mdb *mdb, 567 unsigned long action, const void *ctx, 568 struct netlink_ext_ack *extack) 569 { 570 struct switchdev_notifier_port_obj_info obj_info = { 571 .info = { 572 .dev = dev, 573 .extack = extack, 574 .ctx = ctx, 575 }, 576 .obj = &mdb->obj, 577 }; 578 int err; 579 580 err = nb->notifier_call(nb, action, &obj_info); 581 return notifier_to_errno(err); 582 } 583 584 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list, 585 enum switchdev_obj_id id, 586 const struct net_bridge_mdb_entry *mp, 587 struct net_device *orig_dev) 588 { 589 struct switchdev_obj_port_mdb *mdb; 590 591 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC); 592 if (!mdb) 593 return -ENOMEM; 594 595 mdb->obj.id = id; 596 mdb->obj.orig_dev = orig_dev; 597 br_switchdev_mdb_populate(mdb, mp); 598 list_add_tail(&mdb->obj.list, mdb_list); 599 600 return 0; 601 } 602 603 void br_switchdev_mdb_notify(struct net_device *dev, 604 struct net_bridge_mdb_entry *mp, 605 struct net_bridge_port_group *pg, 606 int type) 607 { 608 struct br_switchdev_mdb_complete_info *complete_info; 609 struct switchdev_obj_port_mdb mdb = { 610 .obj = { 611 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 612 .flags = SWITCHDEV_F_DEFER, 613 }, 614 }; 615 616 if (!pg) 617 return br_switchdev_host_mdb(dev, mp, type); 618 619 br_switchdev_mdb_populate(&mdb, mp); 620 621 mdb.obj.orig_dev = pg->key.port->dev; 622 switch (type) { 623 case RTM_NEWMDB: 624 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 625 if (!complete_info) 626 break; 627 complete_info->port = pg->key.port; 628 complete_info->ip = mp->addr; 629 mdb.obj.complete_priv = complete_info; 630 mdb.obj.complete = br_switchdev_mdb_complete; 631 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL)) 632 kfree(complete_info); 633 break; 634 case RTM_DELMDB: 635 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj); 636 break; 637 } 638 } 639 #endif 640 641 static int 642 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev, 643 const void *ctx, bool adding, struct notifier_block *nb, 644 struct netlink_ext_ack *extack) 645 { 646 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 647 const struct net_bridge_mdb_entry *mp; 648 struct switchdev_obj *obj, *tmp; 649 struct net_bridge *br; 650 unsigned long action; 651 LIST_HEAD(mdb_list); 652 int err = 0; 653 654 ASSERT_RTNL(); 655 656 if (!nb) 657 return 0; 658 659 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev)) 660 return -EINVAL; 661 662 br = netdev_priv(br_dev); 663 664 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 665 return 0; 666 667 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex, 668 * because the write-side protection is br->multicast_lock. But we 669 * need to emulate the [ blocking ] calling context of a regular 670 * switchdev event, so since both br->multicast_lock and RCU read side 671 * critical sections are atomic, we have no choice but to pick the RCU 672 * read side lock, queue up all our events, leave the critical section 673 * and notify switchdev from blocking context. 674 */ 675 rcu_read_lock(); 676 677 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { 678 struct net_bridge_port_group __rcu * const *pp; 679 const struct net_bridge_port_group *p; 680 681 if (mp->host_joined) { 682 err = br_switchdev_mdb_queue_one(&mdb_list, 683 SWITCHDEV_OBJ_ID_HOST_MDB, 684 mp, br_dev); 685 if (err) { 686 rcu_read_unlock(); 687 goto out_free_mdb; 688 } 689 } 690 691 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; 692 pp = &p->next) { 693 if (p->key.port->dev != dev) 694 continue; 695 696 err = br_switchdev_mdb_queue_one(&mdb_list, 697 SWITCHDEV_OBJ_ID_PORT_MDB, 698 mp, dev); 699 if (err) { 700 rcu_read_unlock(); 701 goto out_free_mdb; 702 } 703 } 704 } 705 706 rcu_read_unlock(); 707 708 if (adding) 709 action = SWITCHDEV_PORT_OBJ_ADD; 710 else 711 action = SWITCHDEV_PORT_OBJ_DEL; 712 713 list_for_each_entry(obj, &mdb_list, list) { 714 err = br_switchdev_mdb_replay_one(nb, dev, 715 SWITCHDEV_OBJ_PORT_MDB(obj), 716 action, ctx, extack); 717 if (err) 718 goto out_free_mdb; 719 } 720 721 out_free_mdb: 722 list_for_each_entry_safe(obj, tmp, &mdb_list, list) { 723 list_del(&obj->list); 724 kfree(SWITCHDEV_OBJ_PORT_MDB(obj)); 725 } 726 727 if (err) 728 return err; 729 #endif 730 731 return 0; 732 } 733 734 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx, 735 struct notifier_block *atomic_nb, 736 struct notifier_block *blocking_nb, 737 struct netlink_ext_ack *extack) 738 { 739 struct net_device *br_dev = p->br->dev; 740 struct net_device *dev = p->dev; 741 int err; 742 743 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack); 744 if (err && err != -EOPNOTSUPP) 745 return err; 746 747 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb, 748 extack); 749 if (err && err != -EOPNOTSUPP) 750 return err; 751 752 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb); 753 if (err && err != -EOPNOTSUPP) 754 return err; 755 756 return 0; 757 } 758 759 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p, 760 const void *ctx, 761 struct notifier_block *atomic_nb, 762 struct notifier_block *blocking_nb) 763 { 764 struct net_device *br_dev = p->br->dev; 765 struct net_device *dev = p->dev; 766 767 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb); 768 769 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); 770 771 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL); 772 } 773 774 /* Let the bridge know that this port is offloaded, so that it can assign a 775 * switchdev hardware domain to it. 776 */ 777 int br_switchdev_port_offload(struct net_bridge_port *p, 778 struct net_device *dev, const void *ctx, 779 struct notifier_block *atomic_nb, 780 struct notifier_block *blocking_nb, 781 bool tx_fwd_offload, 782 struct netlink_ext_ack *extack) 783 { 784 struct netdev_phys_item_id ppid; 785 int err; 786 787 err = dev_get_port_parent_id(dev, &ppid, false); 788 if (err) 789 return err; 790 791 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack); 792 if (err) 793 return err; 794 795 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack); 796 if (err) 797 goto out_switchdev_del; 798 799 return 0; 800 801 out_switchdev_del: 802 nbp_switchdev_del(p); 803 804 return err; 805 } 806 807 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx, 808 struct notifier_block *atomic_nb, 809 struct notifier_block *blocking_nb) 810 { 811 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb); 812 813 nbp_switchdev_del(p); 814 } 815