1 #include <linux/err.h> 2 #include <linux/igmp.h> 3 #include <linux/kernel.h> 4 #include <linux/netdevice.h> 5 #include <linux/rculist.h> 6 #include <linux/skbuff.h> 7 #include <linux/if_ether.h> 8 #include <net/ip.h> 9 #include <net/netlink.h> 10 #include <net/switchdev.h> 11 #if IS_ENABLED(CONFIG_IPV6) 12 #include <net/ipv6.h> 13 #include <net/addrconf.h> 14 #endif 15 16 #include "br_private.h" 17 18 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 19 struct net_device *dev) 20 { 21 struct net_bridge *br = netdev_priv(dev); 22 struct net_bridge_port *p; 23 struct nlattr *nest, *port_nest; 24 25 if (!br->multicast_router || hlist_empty(&br->router_list)) 26 return 0; 27 28 nest = nla_nest_start(skb, MDBA_ROUTER); 29 if (nest == NULL) 30 return -EMSGSIZE; 31 32 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 33 if (!p) 34 continue; 35 port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT); 36 if (!port_nest) 37 goto fail; 38 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) || 39 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER, 40 br_timer_value(&p->multicast_router_timer)) || 41 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE, 42 p->multicast_router)) { 43 nla_nest_cancel(skb, port_nest); 44 goto fail; 45 } 46 nla_nest_end(skb, port_nest); 47 } 48 49 nla_nest_end(skb, nest); 50 return 0; 51 fail: 52 nla_nest_cancel(skb, nest); 53 return -EMSGSIZE; 54 } 55 56 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) 57 { 58 e->state = flags & MDB_PG_FLAGS_PERMANENT; 59 e->flags = 0; 60 if (flags & MDB_PG_FLAGS_OFFLOAD) 61 e->flags |= MDB_FLAGS_OFFLOAD; 62 } 63 64 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 65 { 66 memset(ip, 0, sizeof(struct br_ip)); 67 ip->vid = entry->vid; 68 ip->proto = entry->addr.proto; 69 if (ip->proto == htons(ETH_P_IP)) 70 ip->u.ip4 = entry->addr.u.ip4; 71 #if IS_ENABLED(CONFIG_IPV6) 72 else 73 ip->u.ip6 = entry->addr.u.ip6; 74 #endif 75 } 76 77 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 78 struct net_device *dev) 79 { 80 struct net_bridge *br = netdev_priv(dev); 81 struct net_bridge_mdb_htable *mdb; 82 struct nlattr *nest, *nest2; 83 int i, err = 0; 84 int idx = 0, s_idx = cb->args[1]; 85 86 if (br->multicast_disabled) 87 return 0; 88 89 mdb = rcu_dereference(br->mdb); 90 if (!mdb) 91 return 0; 92 93 nest = nla_nest_start(skb, MDBA_MDB); 94 if (nest == NULL) 95 return -EMSGSIZE; 96 97 for (i = 0; i < mdb->max; i++) { 98 struct net_bridge_mdb_entry *mp; 99 struct net_bridge_port_group *p; 100 struct net_bridge_port_group __rcu **pp; 101 struct net_bridge_port *port; 102 103 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { 104 if (idx < s_idx) 105 goto skip; 106 107 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 108 if (nest2 == NULL) { 109 err = -EMSGSIZE; 110 goto out; 111 } 112 113 for (pp = &mp->ports; 114 (p = rcu_dereference(*pp)) != NULL; 115 pp = &p->next) { 116 struct nlattr *nest_ent; 117 struct br_mdb_entry e; 118 119 port = p->port; 120 if (!port) 121 continue; 122 123 memset(&e, 0, sizeof(e)); 124 e.ifindex = port->dev->ifindex; 125 e.vid = p->addr.vid; 126 __mdb_entry_fill_flags(&e, p->flags); 127 if (p->addr.proto == htons(ETH_P_IP)) 128 e.addr.u.ip4 = p->addr.u.ip4; 129 #if IS_ENABLED(CONFIG_IPV6) 130 if (p->addr.proto == htons(ETH_P_IPV6)) 131 e.addr.u.ip6 = p->addr.u.ip6; 132 #endif 133 e.addr.proto = p->addr.proto; 134 nest_ent = nla_nest_start(skb, 135 MDBA_MDB_ENTRY_INFO); 136 if (!nest_ent) { 137 nla_nest_cancel(skb, nest2); 138 err = -EMSGSIZE; 139 goto out; 140 } 141 if (nla_put_nohdr(skb, sizeof(e), &e) || 142 nla_put_u32(skb, 143 MDBA_MDB_EATTR_TIMER, 144 br_timer_value(&p->timer))) { 145 nla_nest_cancel(skb, nest_ent); 146 nla_nest_cancel(skb, nest2); 147 err = -EMSGSIZE; 148 goto out; 149 } 150 nla_nest_end(skb, nest_ent); 151 } 152 nla_nest_end(skb, nest2); 153 skip: 154 idx++; 155 } 156 } 157 158 out: 159 cb->args[1] = idx; 160 nla_nest_end(skb, nest); 161 return err; 162 } 163 164 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 165 { 166 struct net_device *dev; 167 struct net *net = sock_net(skb->sk); 168 struct nlmsghdr *nlh = NULL; 169 int idx = 0, s_idx; 170 171 s_idx = cb->args[0]; 172 173 rcu_read_lock(); 174 175 /* In theory this could be wrapped to 0... */ 176 cb->seq = net->dev_base_seq + br_mdb_rehash_seq; 177 178 for_each_netdev_rcu(net, dev) { 179 if (dev->priv_flags & IFF_EBRIDGE) { 180 struct br_port_msg *bpm; 181 182 if (idx < s_idx) 183 goto skip; 184 185 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 186 cb->nlh->nlmsg_seq, RTM_GETMDB, 187 sizeof(*bpm), NLM_F_MULTI); 188 if (nlh == NULL) 189 break; 190 191 bpm = nlmsg_data(nlh); 192 memset(bpm, 0, sizeof(*bpm)); 193 bpm->ifindex = dev->ifindex; 194 if (br_mdb_fill_info(skb, cb, dev) < 0) 195 goto out; 196 if (br_rports_fill_info(skb, cb, dev) < 0) 197 goto out; 198 199 cb->args[1] = 0; 200 nlmsg_end(skb, nlh); 201 skip: 202 idx++; 203 } 204 } 205 206 out: 207 if (nlh) 208 nlmsg_end(skb, nlh); 209 rcu_read_unlock(); 210 cb->args[0] = idx; 211 return skb->len; 212 } 213 214 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 215 struct net_device *dev, 216 struct br_mdb_entry *entry, u32 pid, 217 u32 seq, int type, unsigned int flags) 218 { 219 struct nlmsghdr *nlh; 220 struct br_port_msg *bpm; 221 struct nlattr *nest, *nest2; 222 223 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 224 if (!nlh) 225 return -EMSGSIZE; 226 227 bpm = nlmsg_data(nlh); 228 memset(bpm, 0, sizeof(*bpm)); 229 bpm->family = AF_BRIDGE; 230 bpm->ifindex = dev->ifindex; 231 nest = nla_nest_start(skb, MDBA_MDB); 232 if (nest == NULL) 233 goto cancel; 234 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 235 if (nest2 == NULL) 236 goto end; 237 238 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 239 goto end; 240 241 nla_nest_end(skb, nest2); 242 nla_nest_end(skb, nest); 243 nlmsg_end(skb, nlh); 244 return 0; 245 246 end: 247 nla_nest_end(skb, nest); 248 cancel: 249 nlmsg_cancel(skb, nlh); 250 return -EMSGSIZE; 251 } 252 253 static inline size_t rtnl_mdb_nlmsg_size(void) 254 { 255 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 256 + nla_total_size(sizeof(struct br_mdb_entry)); 257 } 258 259 struct br_mdb_complete_info { 260 struct net_bridge_port *port; 261 struct br_ip ip; 262 }; 263 264 static void br_mdb_complete(struct net_device *dev, int err, void *priv) 265 { 266 struct br_mdb_complete_info *data = priv; 267 struct net_bridge_port_group __rcu **pp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_mdb_htable *mdb; 270 struct net_bridge_mdb_entry *mp; 271 struct net_bridge_port *port = data->port; 272 struct net_bridge *br = port->br; 273 274 if (err) 275 goto err; 276 277 spin_lock_bh(&br->multicast_lock); 278 mdb = mlock_dereference(br->mdb, br); 279 mp = br_mdb_ip_get(mdb, &data->ip); 280 if (!mp) 281 goto out; 282 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 283 pp = &p->next) { 284 if (p->port != port) 285 continue; 286 p->flags |= MDB_PG_FLAGS_OFFLOAD; 287 } 288 out: 289 spin_unlock_bh(&br->multicast_lock); 290 err: 291 kfree(priv); 292 } 293 294 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 295 struct br_mdb_entry *entry, int type) 296 { 297 struct br_mdb_complete_info *complete_info; 298 struct switchdev_obj_port_mdb mdb = { 299 .obj = { 300 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 301 .flags = SWITCHDEV_F_DEFER, 302 }, 303 .vid = entry->vid, 304 }; 305 struct net_device *port_dev; 306 struct net *net = dev_net(dev); 307 struct sk_buff *skb; 308 int err = -ENOBUFS; 309 310 port_dev = __dev_get_by_index(net, entry->ifindex); 311 if (entry->addr.proto == htons(ETH_P_IP)) 312 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 313 #if IS_ENABLED(CONFIG_IPV6) 314 else 315 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 316 #endif 317 318 mdb.obj.orig_dev = port_dev; 319 if (port_dev && type == RTM_NEWMDB) { 320 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 321 if (complete_info) { 322 complete_info->port = p; 323 __mdb_entry_to_br_ip(entry, &complete_info->ip); 324 mdb.obj.complete_priv = complete_info; 325 mdb.obj.complete = br_mdb_complete; 326 switchdev_port_obj_add(port_dev, &mdb.obj); 327 } 328 } else if (port_dev && type == RTM_DELMDB) { 329 switchdev_port_obj_del(port_dev, &mdb.obj); 330 } 331 332 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 333 if (!skb) 334 goto errout; 335 336 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 337 if (err < 0) { 338 kfree_skb(skb); 339 goto errout; 340 } 341 342 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 343 return; 344 errout: 345 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 346 } 347 348 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 349 struct br_ip *group, int type, u8 flags) 350 { 351 struct br_mdb_entry entry; 352 353 memset(&entry, 0, sizeof(entry)); 354 entry.ifindex = port->dev->ifindex; 355 entry.addr.proto = group->proto; 356 entry.addr.u.ip4 = group->u.ip4; 357 #if IS_ENABLED(CONFIG_IPV6) 358 entry.addr.u.ip6 = group->u.ip6; 359 #endif 360 entry.vid = group->vid; 361 __mdb_entry_fill_flags(&entry, flags); 362 __br_mdb_notify(dev, port, &entry, type); 363 } 364 365 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 366 struct net_device *dev, 367 int ifindex, u32 pid, 368 u32 seq, int type, unsigned int flags) 369 { 370 struct br_port_msg *bpm; 371 struct nlmsghdr *nlh; 372 struct nlattr *nest; 373 374 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 375 if (!nlh) 376 return -EMSGSIZE; 377 378 bpm = nlmsg_data(nlh); 379 memset(bpm, 0, sizeof(*bpm)); 380 bpm->family = AF_BRIDGE; 381 bpm->ifindex = dev->ifindex; 382 nest = nla_nest_start(skb, MDBA_ROUTER); 383 if (!nest) 384 goto cancel; 385 386 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 387 goto end; 388 389 nla_nest_end(skb, nest); 390 nlmsg_end(skb, nlh); 391 return 0; 392 393 end: 394 nla_nest_end(skb, nest); 395 cancel: 396 nlmsg_cancel(skb, nlh); 397 return -EMSGSIZE; 398 } 399 400 static inline size_t rtnl_rtr_nlmsg_size(void) 401 { 402 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 403 + nla_total_size(sizeof(__u32)); 404 } 405 406 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 407 int type) 408 { 409 struct net *net = dev_net(dev); 410 struct sk_buff *skb; 411 int err = -ENOBUFS; 412 int ifindex; 413 414 ifindex = port ? port->dev->ifindex : 0; 415 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 416 if (!skb) 417 goto errout; 418 419 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 420 if (err < 0) { 421 kfree_skb(skb); 422 goto errout; 423 } 424 425 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 426 return; 427 428 errout: 429 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 430 } 431 432 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 433 { 434 if (entry->ifindex == 0) 435 return false; 436 437 if (entry->addr.proto == htons(ETH_P_IP)) { 438 if (!ipv4_is_multicast(entry->addr.u.ip4)) 439 return false; 440 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 441 return false; 442 #if IS_ENABLED(CONFIG_IPV6) 443 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 444 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 445 return false; 446 #endif 447 } else 448 return false; 449 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 450 return false; 451 if (entry->vid >= VLAN_VID_MASK) 452 return false; 453 454 return true; 455 } 456 457 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 458 struct net_device **pdev, struct br_mdb_entry **pentry) 459 { 460 struct net *net = sock_net(skb->sk); 461 struct br_mdb_entry *entry; 462 struct br_port_msg *bpm; 463 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 464 struct net_device *dev; 465 int err; 466 467 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL, 468 NULL); 469 if (err < 0) 470 return err; 471 472 bpm = nlmsg_data(nlh); 473 if (bpm->ifindex == 0) { 474 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 475 return -EINVAL; 476 } 477 478 dev = __dev_get_by_index(net, bpm->ifindex); 479 if (dev == NULL) { 480 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 481 return -ENODEV; 482 } 483 484 if (!(dev->priv_flags & IFF_EBRIDGE)) { 485 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 486 return -EOPNOTSUPP; 487 } 488 489 *pdev = dev; 490 491 if (!tb[MDBA_SET_ENTRY] || 492 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 493 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 494 return -EINVAL; 495 } 496 497 entry = nla_data(tb[MDBA_SET_ENTRY]); 498 if (!is_valid_mdb_entry(entry)) { 499 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 500 return -EINVAL; 501 } 502 503 *pentry = entry; 504 return 0; 505 } 506 507 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 508 struct br_ip *group, unsigned char state) 509 { 510 struct net_bridge_mdb_entry *mp; 511 struct net_bridge_port_group *p; 512 struct net_bridge_port_group __rcu **pp; 513 struct net_bridge_mdb_htable *mdb; 514 unsigned long now = jiffies; 515 int err; 516 517 mdb = mlock_dereference(br->mdb, br); 518 mp = br_mdb_ip_get(mdb, group); 519 if (!mp) { 520 mp = br_multicast_new_group(br, port, group); 521 err = PTR_ERR_OR_ZERO(mp); 522 if (err) 523 return err; 524 } 525 526 for (pp = &mp->ports; 527 (p = mlock_dereference(*pp, br)) != NULL; 528 pp = &p->next) { 529 if (p->port == port) 530 return -EEXIST; 531 if ((unsigned long)p->port < (unsigned long)port) 532 break; 533 } 534 535 p = br_multicast_new_port_group(port, group, *pp, state, NULL); 536 if (unlikely(!p)) 537 return -ENOMEM; 538 rcu_assign_pointer(*pp, p); 539 if (state == MDB_TEMPORARY) 540 mod_timer(&p->timer, now + br->multicast_membership_interval); 541 542 return 0; 543 } 544 545 static int __br_mdb_add(struct net *net, struct net_bridge *br, 546 struct br_mdb_entry *entry) 547 { 548 struct br_ip ip; 549 struct net_device *dev; 550 struct net_bridge_port *p; 551 int ret; 552 553 if (!netif_running(br->dev) || br->multicast_disabled) 554 return -EINVAL; 555 556 dev = __dev_get_by_index(net, entry->ifindex); 557 if (!dev) 558 return -ENODEV; 559 560 p = br_port_get_rtnl(dev); 561 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 562 return -EINVAL; 563 564 __mdb_entry_to_br_ip(entry, &ip); 565 566 spin_lock_bh(&br->multicast_lock); 567 ret = br_mdb_add_group(br, p, &ip, entry->state); 568 spin_unlock_bh(&br->multicast_lock); 569 return ret; 570 } 571 572 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 573 struct netlink_ext_ack *extack) 574 { 575 struct net *net = sock_net(skb->sk); 576 struct net_bridge_vlan_group *vg; 577 struct net_device *dev, *pdev; 578 struct br_mdb_entry *entry; 579 struct net_bridge_port *p; 580 struct net_bridge_vlan *v; 581 struct net_bridge *br; 582 int err; 583 584 err = br_mdb_parse(skb, nlh, &dev, &entry); 585 if (err < 0) 586 return err; 587 588 br = netdev_priv(dev); 589 590 /* If vlan filtering is enabled and VLAN is not specified 591 * install mdb entry on all vlans configured on the port. 592 */ 593 pdev = __dev_get_by_index(net, entry->ifindex); 594 if (!pdev) 595 return -ENODEV; 596 597 p = br_port_get_rtnl(pdev); 598 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 599 return -EINVAL; 600 601 vg = nbp_vlan_group(p); 602 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 603 list_for_each_entry(v, &vg->vlan_list, vlist) { 604 entry->vid = v->vid; 605 err = __br_mdb_add(net, br, entry); 606 if (err) 607 break; 608 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 609 } 610 } else { 611 err = __br_mdb_add(net, br, entry); 612 if (!err) 613 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 614 } 615 616 return err; 617 } 618 619 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 620 { 621 struct net_bridge_mdb_htable *mdb; 622 struct net_bridge_mdb_entry *mp; 623 struct net_bridge_port_group *p; 624 struct net_bridge_port_group __rcu **pp; 625 struct br_ip ip; 626 int err = -EINVAL; 627 628 if (!netif_running(br->dev) || br->multicast_disabled) 629 return -EINVAL; 630 631 __mdb_entry_to_br_ip(entry, &ip); 632 633 spin_lock_bh(&br->multicast_lock); 634 mdb = mlock_dereference(br->mdb, br); 635 636 mp = br_mdb_ip_get(mdb, &ip); 637 if (!mp) 638 goto unlock; 639 640 for (pp = &mp->ports; 641 (p = mlock_dereference(*pp, br)) != NULL; 642 pp = &p->next) { 643 if (!p->port || p->port->dev->ifindex != entry->ifindex) 644 continue; 645 646 if (p->port->state == BR_STATE_DISABLED) 647 goto unlock; 648 649 __mdb_entry_fill_flags(entry, p->flags); 650 rcu_assign_pointer(*pp, p->next); 651 hlist_del_init(&p->mglist); 652 del_timer(&p->timer); 653 call_rcu_bh(&p->rcu, br_multicast_free_pg); 654 err = 0; 655 656 if (!mp->ports && !mp->mglist && 657 netif_running(br->dev)) 658 mod_timer(&mp->timer, jiffies); 659 break; 660 } 661 662 unlock: 663 spin_unlock_bh(&br->multicast_lock); 664 return err; 665 } 666 667 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 668 struct netlink_ext_ack *extack) 669 { 670 struct net *net = sock_net(skb->sk); 671 struct net_bridge_vlan_group *vg; 672 struct net_device *dev, *pdev; 673 struct br_mdb_entry *entry; 674 struct net_bridge_port *p; 675 struct net_bridge_vlan *v; 676 struct net_bridge *br; 677 int err; 678 679 err = br_mdb_parse(skb, nlh, &dev, &entry); 680 if (err < 0) 681 return err; 682 683 br = netdev_priv(dev); 684 685 /* If vlan filtering is enabled and VLAN is not specified 686 * delete mdb entry on all vlans configured on the port. 687 */ 688 pdev = __dev_get_by_index(net, entry->ifindex); 689 if (!pdev) 690 return -ENODEV; 691 692 p = br_port_get_rtnl(pdev); 693 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 694 return -EINVAL; 695 696 vg = nbp_vlan_group(p); 697 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 698 list_for_each_entry(v, &vg->vlan_list, vlist) { 699 entry->vid = v->vid; 700 err = __br_mdb_del(br, entry); 701 if (!err) 702 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 703 } 704 } else { 705 err = __br_mdb_del(br, entry); 706 if (!err) 707 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 708 } 709 710 return err; 711 } 712 713 void br_mdb_init(void) 714 { 715 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL); 716 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); 717 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); 718 } 719 720 void br_mdb_uninit(void) 721 { 722 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 723 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 724 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 725 } 726