1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/err.h> 3 #include <linux/igmp.h> 4 #include <linux/kernel.h> 5 #include <linux/netdevice.h> 6 #include <linux/rculist.h> 7 #include <linux/skbuff.h> 8 #include <linux/if_ether.h> 9 #include <net/ip.h> 10 #include <net/netlink.h> 11 #include <net/switchdev.h> 12 #if IS_ENABLED(CONFIG_IPV6) 13 #include <net/ipv6.h> 14 #include <net/addrconf.h> 15 #endif 16 17 #include "br_private.h" 18 19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 20 struct net_device *dev) 21 { 22 struct net_bridge *br = netdev_priv(dev); 23 struct net_bridge_port *p; 24 struct nlattr *nest, *port_nest; 25 26 if (!br->multicast_router || hlist_empty(&br->router_list)) 27 return 0; 28 29 nest = nla_nest_start(skb, MDBA_ROUTER); 30 if (nest == NULL) 31 return -EMSGSIZE; 32 33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 34 if (!p) 35 continue; 36 port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT); 37 if (!port_nest) 38 goto fail; 39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) || 40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER, 41 br_timer_value(&p->multicast_router_timer)) || 42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE, 43 p->multicast_router)) { 44 nla_nest_cancel(skb, port_nest); 45 goto fail; 46 } 47 nla_nest_end(skb, port_nest); 48 } 49 50 nla_nest_end(skb, nest); 51 return 0; 52 fail: 53 nla_nest_cancel(skb, nest); 54 return -EMSGSIZE; 55 } 56 57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) 58 { 59 e->state = flags & MDB_PG_FLAGS_PERMANENT; 60 e->flags = 0; 61 if (flags & MDB_PG_FLAGS_OFFLOAD) 62 e->flags |= MDB_FLAGS_OFFLOAD; 63 } 64 65 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 66 { 67 memset(ip, 0, sizeof(struct br_ip)); 68 ip->vid = entry->vid; 69 ip->proto = entry->addr.proto; 70 if (ip->proto == htons(ETH_P_IP)) 71 ip->u.ip4 = entry->addr.u.ip4; 72 #if IS_ENABLED(CONFIG_IPV6) 73 else 74 ip->u.ip6 = entry->addr.u.ip6; 75 #endif 76 } 77 78 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 79 struct net_device *dev) 80 { 81 struct net_bridge *br = netdev_priv(dev); 82 struct net_bridge_mdb_htable *mdb; 83 struct nlattr *nest, *nest2; 84 int i, err = 0; 85 int idx = 0, s_idx = cb->args[1]; 86 87 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 88 return 0; 89 90 mdb = rcu_dereference(br->mdb); 91 if (!mdb) 92 return 0; 93 94 nest = nla_nest_start(skb, MDBA_MDB); 95 if (nest == NULL) 96 return -EMSGSIZE; 97 98 for (i = 0; i < mdb->max; i++) { 99 struct net_bridge_mdb_entry *mp; 100 struct net_bridge_port_group *p; 101 struct net_bridge_port_group __rcu **pp; 102 struct net_bridge_port *port; 103 104 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { 105 if (idx < s_idx) 106 goto skip; 107 108 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 109 if (nest2 == NULL) { 110 err = -EMSGSIZE; 111 goto out; 112 } 113 114 for (pp = &mp->ports; 115 (p = rcu_dereference(*pp)) != NULL; 116 pp = &p->next) { 117 struct nlattr *nest_ent; 118 struct br_mdb_entry e; 119 120 port = p->port; 121 if (!port) 122 continue; 123 124 memset(&e, 0, sizeof(e)); 125 e.ifindex = port->dev->ifindex; 126 e.vid = p->addr.vid; 127 __mdb_entry_fill_flags(&e, p->flags); 128 if (p->addr.proto == htons(ETH_P_IP)) 129 e.addr.u.ip4 = p->addr.u.ip4; 130 #if IS_ENABLED(CONFIG_IPV6) 131 if (p->addr.proto == htons(ETH_P_IPV6)) 132 e.addr.u.ip6 = p->addr.u.ip6; 133 #endif 134 e.addr.proto = p->addr.proto; 135 nest_ent = nla_nest_start(skb, 136 MDBA_MDB_ENTRY_INFO); 137 if (!nest_ent) { 138 nla_nest_cancel(skb, nest2); 139 err = -EMSGSIZE; 140 goto out; 141 } 142 if (nla_put_nohdr(skb, sizeof(e), &e) || 143 nla_put_u32(skb, 144 MDBA_MDB_EATTR_TIMER, 145 br_timer_value(&p->timer))) { 146 nla_nest_cancel(skb, nest_ent); 147 nla_nest_cancel(skb, nest2); 148 err = -EMSGSIZE; 149 goto out; 150 } 151 nla_nest_end(skb, nest_ent); 152 } 153 nla_nest_end(skb, nest2); 154 skip: 155 idx++; 156 } 157 } 158 159 out: 160 cb->args[1] = idx; 161 nla_nest_end(skb, nest); 162 return err; 163 } 164 165 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh, 166 struct netlink_ext_ack *extack) 167 { 168 struct br_port_msg *bpm; 169 170 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 171 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request"); 172 return -EINVAL; 173 } 174 175 bpm = nlmsg_data(nlh); 176 if (bpm->ifindex) { 177 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request"); 178 return -EINVAL; 179 } 180 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 181 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 182 return -EINVAL; 183 } 184 185 return 0; 186 } 187 188 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 189 { 190 struct net_device *dev; 191 struct net *net = sock_net(skb->sk); 192 struct nlmsghdr *nlh = NULL; 193 int idx = 0, s_idx; 194 195 if (cb->strict_check) { 196 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack); 197 198 if (err < 0) 199 return err; 200 } 201 202 s_idx = cb->args[0]; 203 204 rcu_read_lock(); 205 206 /* In theory this could be wrapped to 0... */ 207 cb->seq = net->dev_base_seq + br_mdb_rehash_seq; 208 209 for_each_netdev_rcu(net, dev) { 210 if (dev->priv_flags & IFF_EBRIDGE) { 211 struct br_port_msg *bpm; 212 213 if (idx < s_idx) 214 goto skip; 215 216 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 217 cb->nlh->nlmsg_seq, RTM_GETMDB, 218 sizeof(*bpm), NLM_F_MULTI); 219 if (nlh == NULL) 220 break; 221 222 bpm = nlmsg_data(nlh); 223 memset(bpm, 0, sizeof(*bpm)); 224 bpm->ifindex = dev->ifindex; 225 if (br_mdb_fill_info(skb, cb, dev) < 0) 226 goto out; 227 if (br_rports_fill_info(skb, cb, dev) < 0) 228 goto out; 229 230 cb->args[1] = 0; 231 nlmsg_end(skb, nlh); 232 skip: 233 idx++; 234 } 235 } 236 237 out: 238 if (nlh) 239 nlmsg_end(skb, nlh); 240 rcu_read_unlock(); 241 cb->args[0] = idx; 242 return skb->len; 243 } 244 245 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 246 struct net_device *dev, 247 struct br_mdb_entry *entry, u32 pid, 248 u32 seq, int type, unsigned int flags) 249 { 250 struct nlmsghdr *nlh; 251 struct br_port_msg *bpm; 252 struct nlattr *nest, *nest2; 253 254 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 255 if (!nlh) 256 return -EMSGSIZE; 257 258 bpm = nlmsg_data(nlh); 259 memset(bpm, 0, sizeof(*bpm)); 260 bpm->family = AF_BRIDGE; 261 bpm->ifindex = dev->ifindex; 262 nest = nla_nest_start(skb, MDBA_MDB); 263 if (nest == NULL) 264 goto cancel; 265 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 266 if (nest2 == NULL) 267 goto end; 268 269 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 270 goto end; 271 272 nla_nest_end(skb, nest2); 273 nla_nest_end(skb, nest); 274 nlmsg_end(skb, nlh); 275 return 0; 276 277 end: 278 nla_nest_end(skb, nest); 279 cancel: 280 nlmsg_cancel(skb, nlh); 281 return -EMSGSIZE; 282 } 283 284 static inline size_t rtnl_mdb_nlmsg_size(void) 285 { 286 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 287 + nla_total_size(sizeof(struct br_mdb_entry)); 288 } 289 290 struct br_mdb_complete_info { 291 struct net_bridge_port *port; 292 struct br_ip ip; 293 }; 294 295 static void br_mdb_complete(struct net_device *dev, int err, void *priv) 296 { 297 struct br_mdb_complete_info *data = priv; 298 struct net_bridge_port_group __rcu **pp; 299 struct net_bridge_port_group *p; 300 struct net_bridge_mdb_htable *mdb; 301 struct net_bridge_mdb_entry *mp; 302 struct net_bridge_port *port = data->port; 303 struct net_bridge *br = port->br; 304 305 if (err) 306 goto err; 307 308 spin_lock_bh(&br->multicast_lock); 309 mdb = mlock_dereference(br->mdb, br); 310 mp = br_mdb_ip_get(mdb, &data->ip); 311 if (!mp) 312 goto out; 313 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 314 pp = &p->next) { 315 if (p->port != port) 316 continue; 317 p->flags |= MDB_PG_FLAGS_OFFLOAD; 318 } 319 out: 320 spin_unlock_bh(&br->multicast_lock); 321 err: 322 kfree(priv); 323 } 324 325 static void br_mdb_switchdev_host_port(struct net_device *dev, 326 struct net_device *lower_dev, 327 struct br_mdb_entry *entry, int type) 328 { 329 struct switchdev_obj_port_mdb mdb = { 330 .obj = { 331 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 332 .flags = SWITCHDEV_F_DEFER, 333 }, 334 .vid = entry->vid, 335 }; 336 337 if (entry->addr.proto == htons(ETH_P_IP)) 338 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 339 #if IS_ENABLED(CONFIG_IPV6) 340 else 341 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 342 #endif 343 344 mdb.obj.orig_dev = dev; 345 switch (type) { 346 case RTM_NEWMDB: 347 switchdev_port_obj_add(lower_dev, &mdb.obj); 348 break; 349 case RTM_DELMDB: 350 switchdev_port_obj_del(lower_dev, &mdb.obj); 351 break; 352 } 353 } 354 355 static void br_mdb_switchdev_host(struct net_device *dev, 356 struct br_mdb_entry *entry, int type) 357 { 358 struct net_device *lower_dev; 359 struct list_head *iter; 360 361 netdev_for_each_lower_dev(dev, lower_dev, iter) 362 br_mdb_switchdev_host_port(dev, lower_dev, entry, type); 363 } 364 365 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 366 struct br_mdb_entry *entry, int type) 367 { 368 struct br_mdb_complete_info *complete_info; 369 struct switchdev_obj_port_mdb mdb = { 370 .obj = { 371 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 372 .flags = SWITCHDEV_F_DEFER, 373 }, 374 .vid = entry->vid, 375 }; 376 struct net_device *port_dev; 377 struct net *net = dev_net(dev); 378 struct sk_buff *skb; 379 int err = -ENOBUFS; 380 381 port_dev = __dev_get_by_index(net, entry->ifindex); 382 if (entry->addr.proto == htons(ETH_P_IP)) 383 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 384 #if IS_ENABLED(CONFIG_IPV6) 385 else 386 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 387 #endif 388 389 mdb.obj.orig_dev = port_dev; 390 if (p && port_dev && type == RTM_NEWMDB) { 391 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 392 if (complete_info) { 393 complete_info->port = p; 394 __mdb_entry_to_br_ip(entry, &complete_info->ip); 395 mdb.obj.complete_priv = complete_info; 396 mdb.obj.complete = br_mdb_complete; 397 if (switchdev_port_obj_add(port_dev, &mdb.obj)) 398 kfree(complete_info); 399 } 400 } else if (p && port_dev && type == RTM_DELMDB) { 401 switchdev_port_obj_del(port_dev, &mdb.obj); 402 } 403 404 if (!p) 405 br_mdb_switchdev_host(dev, entry, type); 406 407 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 408 if (!skb) 409 goto errout; 410 411 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 412 if (err < 0) { 413 kfree_skb(skb); 414 goto errout; 415 } 416 417 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 418 return; 419 errout: 420 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 421 } 422 423 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 424 struct br_ip *group, int type, u8 flags) 425 { 426 struct br_mdb_entry entry; 427 428 memset(&entry, 0, sizeof(entry)); 429 if (port) 430 entry.ifindex = port->dev->ifindex; 431 else 432 entry.ifindex = dev->ifindex; 433 entry.addr.proto = group->proto; 434 entry.addr.u.ip4 = group->u.ip4; 435 #if IS_ENABLED(CONFIG_IPV6) 436 entry.addr.u.ip6 = group->u.ip6; 437 #endif 438 entry.vid = group->vid; 439 __mdb_entry_fill_flags(&entry, flags); 440 __br_mdb_notify(dev, port, &entry, type); 441 } 442 443 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 444 struct net_device *dev, 445 int ifindex, u32 pid, 446 u32 seq, int type, unsigned int flags) 447 { 448 struct br_port_msg *bpm; 449 struct nlmsghdr *nlh; 450 struct nlattr *nest; 451 452 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 453 if (!nlh) 454 return -EMSGSIZE; 455 456 bpm = nlmsg_data(nlh); 457 memset(bpm, 0, sizeof(*bpm)); 458 bpm->family = AF_BRIDGE; 459 bpm->ifindex = dev->ifindex; 460 nest = nla_nest_start(skb, MDBA_ROUTER); 461 if (!nest) 462 goto cancel; 463 464 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 465 goto end; 466 467 nla_nest_end(skb, nest); 468 nlmsg_end(skb, nlh); 469 return 0; 470 471 end: 472 nla_nest_end(skb, nest); 473 cancel: 474 nlmsg_cancel(skb, nlh); 475 return -EMSGSIZE; 476 } 477 478 static inline size_t rtnl_rtr_nlmsg_size(void) 479 { 480 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 481 + nla_total_size(sizeof(__u32)); 482 } 483 484 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 485 int type) 486 { 487 struct net *net = dev_net(dev); 488 struct sk_buff *skb; 489 int err = -ENOBUFS; 490 int ifindex; 491 492 ifindex = port ? port->dev->ifindex : 0; 493 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 494 if (!skb) 495 goto errout; 496 497 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 498 if (err < 0) { 499 kfree_skb(skb); 500 goto errout; 501 } 502 503 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 504 return; 505 506 errout: 507 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 508 } 509 510 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 511 { 512 if (entry->ifindex == 0) 513 return false; 514 515 if (entry->addr.proto == htons(ETH_P_IP)) { 516 if (!ipv4_is_multicast(entry->addr.u.ip4)) 517 return false; 518 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 519 return false; 520 #if IS_ENABLED(CONFIG_IPV6) 521 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 522 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 523 return false; 524 #endif 525 } else 526 return false; 527 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 528 return false; 529 if (entry->vid >= VLAN_VID_MASK) 530 return false; 531 532 return true; 533 } 534 535 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 536 struct net_device **pdev, struct br_mdb_entry **pentry) 537 { 538 struct net *net = sock_net(skb->sk); 539 struct br_mdb_entry *entry; 540 struct br_port_msg *bpm; 541 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 542 struct net_device *dev; 543 int err; 544 545 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL, 546 NULL); 547 if (err < 0) 548 return err; 549 550 bpm = nlmsg_data(nlh); 551 if (bpm->ifindex == 0) { 552 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 553 return -EINVAL; 554 } 555 556 dev = __dev_get_by_index(net, bpm->ifindex); 557 if (dev == NULL) { 558 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 559 return -ENODEV; 560 } 561 562 if (!(dev->priv_flags & IFF_EBRIDGE)) { 563 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 564 return -EOPNOTSUPP; 565 } 566 567 *pdev = dev; 568 569 if (!tb[MDBA_SET_ENTRY] || 570 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 571 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 572 return -EINVAL; 573 } 574 575 entry = nla_data(tb[MDBA_SET_ENTRY]); 576 if (!is_valid_mdb_entry(entry)) { 577 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 578 return -EINVAL; 579 } 580 581 *pentry = entry; 582 return 0; 583 } 584 585 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 586 struct br_ip *group, unsigned char state) 587 { 588 struct net_bridge_mdb_entry *mp; 589 struct net_bridge_port_group *p; 590 struct net_bridge_port_group __rcu **pp; 591 struct net_bridge_mdb_htable *mdb; 592 unsigned long now = jiffies; 593 int err; 594 595 mdb = mlock_dereference(br->mdb, br); 596 mp = br_mdb_ip_get(mdb, group); 597 if (!mp) { 598 mp = br_multicast_new_group(br, port, group); 599 err = PTR_ERR_OR_ZERO(mp); 600 if (err) 601 return err; 602 } 603 604 for (pp = &mp->ports; 605 (p = mlock_dereference(*pp, br)) != NULL; 606 pp = &p->next) { 607 if (p->port == port) 608 return -EEXIST; 609 if ((unsigned long)p->port < (unsigned long)port) 610 break; 611 } 612 613 p = br_multicast_new_port_group(port, group, *pp, state, NULL); 614 if (unlikely(!p)) 615 return -ENOMEM; 616 rcu_assign_pointer(*pp, p); 617 if (state == MDB_TEMPORARY) 618 mod_timer(&p->timer, now + br->multicast_membership_interval); 619 620 return 0; 621 } 622 623 static int __br_mdb_add(struct net *net, struct net_bridge *br, 624 struct br_mdb_entry *entry) 625 { 626 struct br_ip ip; 627 struct net_device *dev; 628 struct net_bridge_port *p; 629 int ret; 630 631 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 632 return -EINVAL; 633 634 dev = __dev_get_by_index(net, entry->ifindex); 635 if (!dev) 636 return -ENODEV; 637 638 p = br_port_get_rtnl(dev); 639 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 640 return -EINVAL; 641 642 __mdb_entry_to_br_ip(entry, &ip); 643 644 spin_lock_bh(&br->multicast_lock); 645 ret = br_mdb_add_group(br, p, &ip, entry->state); 646 spin_unlock_bh(&br->multicast_lock); 647 return ret; 648 } 649 650 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 651 struct netlink_ext_ack *extack) 652 { 653 struct net *net = sock_net(skb->sk); 654 struct net_bridge_vlan_group *vg; 655 struct net_device *dev, *pdev; 656 struct br_mdb_entry *entry; 657 struct net_bridge_port *p; 658 struct net_bridge_vlan *v; 659 struct net_bridge *br; 660 int err; 661 662 err = br_mdb_parse(skb, nlh, &dev, &entry); 663 if (err < 0) 664 return err; 665 666 br = netdev_priv(dev); 667 668 /* If vlan filtering is enabled and VLAN is not specified 669 * install mdb entry on all vlans configured on the port. 670 */ 671 pdev = __dev_get_by_index(net, entry->ifindex); 672 if (!pdev) 673 return -ENODEV; 674 675 p = br_port_get_rtnl(pdev); 676 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 677 return -EINVAL; 678 679 vg = nbp_vlan_group(p); 680 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 681 list_for_each_entry(v, &vg->vlan_list, vlist) { 682 entry->vid = v->vid; 683 err = __br_mdb_add(net, br, entry); 684 if (err) 685 break; 686 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 687 } 688 } else { 689 err = __br_mdb_add(net, br, entry); 690 if (!err) 691 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 692 } 693 694 return err; 695 } 696 697 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 698 { 699 struct net_bridge_mdb_htable *mdb; 700 struct net_bridge_mdb_entry *mp; 701 struct net_bridge_port_group *p; 702 struct net_bridge_port_group __rcu **pp; 703 struct br_ip ip; 704 int err = -EINVAL; 705 706 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 707 return -EINVAL; 708 709 __mdb_entry_to_br_ip(entry, &ip); 710 711 spin_lock_bh(&br->multicast_lock); 712 mdb = mlock_dereference(br->mdb, br); 713 714 mp = br_mdb_ip_get(mdb, &ip); 715 if (!mp) 716 goto unlock; 717 718 for (pp = &mp->ports; 719 (p = mlock_dereference(*pp, br)) != NULL; 720 pp = &p->next) { 721 if (!p->port || p->port->dev->ifindex != entry->ifindex) 722 continue; 723 724 if (p->port->state == BR_STATE_DISABLED) 725 goto unlock; 726 727 __mdb_entry_fill_flags(entry, p->flags); 728 rcu_assign_pointer(*pp, p->next); 729 hlist_del_init(&p->mglist); 730 del_timer(&p->timer); 731 call_rcu_bh(&p->rcu, br_multicast_free_pg); 732 err = 0; 733 734 if (!mp->ports && !mp->host_joined && 735 netif_running(br->dev)) 736 mod_timer(&mp->timer, jiffies); 737 break; 738 } 739 740 unlock: 741 spin_unlock_bh(&br->multicast_lock); 742 return err; 743 } 744 745 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 746 struct netlink_ext_ack *extack) 747 { 748 struct net *net = sock_net(skb->sk); 749 struct net_bridge_vlan_group *vg; 750 struct net_device *dev, *pdev; 751 struct br_mdb_entry *entry; 752 struct net_bridge_port *p; 753 struct net_bridge_vlan *v; 754 struct net_bridge *br; 755 int err; 756 757 err = br_mdb_parse(skb, nlh, &dev, &entry); 758 if (err < 0) 759 return err; 760 761 br = netdev_priv(dev); 762 763 /* If vlan filtering is enabled and VLAN is not specified 764 * delete mdb entry on all vlans configured on the port. 765 */ 766 pdev = __dev_get_by_index(net, entry->ifindex); 767 if (!pdev) 768 return -ENODEV; 769 770 p = br_port_get_rtnl(pdev); 771 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 772 return -EINVAL; 773 774 vg = nbp_vlan_group(p); 775 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 776 list_for_each_entry(v, &vg->vlan_list, vlist) { 777 entry->vid = v->vid; 778 err = __br_mdb_del(br, entry); 779 if (!err) 780 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 781 } 782 } else { 783 err = __br_mdb_del(br, entry); 784 if (!err) 785 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 786 } 787 788 return err; 789 } 790 791 void br_mdb_init(void) 792 { 793 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0); 794 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0); 795 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0); 796 } 797 798 void br_mdb_uninit(void) 799 { 800 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 801 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 802 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 803 } 804