1 #include <linux/err.h> 2 #include <linux/igmp.h> 3 #include <linux/kernel.h> 4 #include <linux/netdevice.h> 5 #include <linux/rculist.h> 6 #include <linux/skbuff.h> 7 #include <linux/if_ether.h> 8 #include <net/ip.h> 9 #include <net/netlink.h> 10 #if IS_ENABLED(CONFIG_IPV6) 11 #include <net/ipv6.h> 12 #include <net/addrconf.h> 13 #endif 14 15 #include "br_private.h" 16 17 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 18 struct net_device *dev) 19 { 20 struct net_bridge *br = netdev_priv(dev); 21 struct net_bridge_port *p; 22 struct nlattr *nest; 23 24 if (!br->multicast_router || hlist_empty(&br->router_list)) 25 return 0; 26 27 nest = nla_nest_start(skb, MDBA_ROUTER); 28 if (nest == NULL) 29 return -EMSGSIZE; 30 31 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 32 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) 33 goto fail; 34 } 35 36 nla_nest_end(skb, nest); 37 return 0; 38 fail: 39 nla_nest_cancel(skb, nest); 40 return -EMSGSIZE; 41 } 42 43 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 44 struct net_device *dev) 45 { 46 struct net_bridge *br = netdev_priv(dev); 47 struct net_bridge_mdb_htable *mdb; 48 struct nlattr *nest, *nest2; 49 int i, err = 0; 50 int idx = 0, s_idx = cb->args[1]; 51 52 if (br->multicast_disabled) 53 return 0; 54 55 mdb = rcu_dereference(br->mdb); 56 if (!mdb) 57 return 0; 58 59 nest = nla_nest_start(skb, MDBA_MDB); 60 if (nest == NULL) 61 return -EMSGSIZE; 62 63 for (i = 0; i < mdb->max; i++) { 64 struct net_bridge_mdb_entry *mp; 65 struct net_bridge_port_group *p; 66 struct net_bridge_port_group __rcu **pp; 67 struct net_bridge_port *port; 68 69 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { 70 if (idx < s_idx) 71 goto skip; 72 73 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 74 if (nest2 == NULL) { 75 err = -EMSGSIZE; 76 goto out; 77 } 78 79 for (pp = &mp->ports; 80 (p = rcu_dereference(*pp)) != NULL; 81 pp = &p->next) { 82 port = p->port; 83 if (port) { 84 struct br_mdb_entry e; 85 memset(&e, 0, sizeof(e)); 86 e.ifindex = port->dev->ifindex; 87 e.state = p->state; 88 e.vid = p->addr.vid; 89 if (p->addr.proto == htons(ETH_P_IP)) 90 e.addr.u.ip4 = p->addr.u.ip4; 91 #if IS_ENABLED(CONFIG_IPV6) 92 if (p->addr.proto == htons(ETH_P_IPV6)) 93 e.addr.u.ip6 = p->addr.u.ip6; 94 #endif 95 e.addr.proto = p->addr.proto; 96 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) { 97 nla_nest_cancel(skb, nest2); 98 err = -EMSGSIZE; 99 goto out; 100 } 101 } 102 } 103 nla_nest_end(skb, nest2); 104 skip: 105 idx++; 106 } 107 } 108 109 out: 110 cb->args[1] = idx; 111 nla_nest_end(skb, nest); 112 return err; 113 } 114 115 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 116 { 117 struct net_device *dev; 118 struct net *net = sock_net(skb->sk); 119 struct nlmsghdr *nlh = NULL; 120 int idx = 0, s_idx; 121 122 s_idx = cb->args[0]; 123 124 rcu_read_lock(); 125 126 /* In theory this could be wrapped to 0... */ 127 cb->seq = net->dev_base_seq + br_mdb_rehash_seq; 128 129 for_each_netdev_rcu(net, dev) { 130 if (dev->priv_flags & IFF_EBRIDGE) { 131 struct br_port_msg *bpm; 132 133 if (idx < s_idx) 134 goto skip; 135 136 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 137 cb->nlh->nlmsg_seq, RTM_GETMDB, 138 sizeof(*bpm), NLM_F_MULTI); 139 if (nlh == NULL) 140 break; 141 142 bpm = nlmsg_data(nlh); 143 memset(bpm, 0, sizeof(*bpm)); 144 bpm->ifindex = dev->ifindex; 145 if (br_mdb_fill_info(skb, cb, dev) < 0) 146 goto out; 147 if (br_rports_fill_info(skb, cb, dev) < 0) 148 goto out; 149 150 cb->args[1] = 0; 151 nlmsg_end(skb, nlh); 152 skip: 153 idx++; 154 } 155 } 156 157 out: 158 if (nlh) 159 nlmsg_end(skb, nlh); 160 rcu_read_unlock(); 161 cb->args[0] = idx; 162 return skb->len; 163 } 164 165 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 166 struct net_device *dev, 167 struct br_mdb_entry *entry, u32 pid, 168 u32 seq, int type, unsigned int flags) 169 { 170 struct nlmsghdr *nlh; 171 struct br_port_msg *bpm; 172 struct nlattr *nest, *nest2; 173 174 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 175 if (!nlh) 176 return -EMSGSIZE; 177 178 bpm = nlmsg_data(nlh); 179 memset(bpm, 0, sizeof(*bpm)); 180 bpm->family = AF_BRIDGE; 181 bpm->ifindex = dev->ifindex; 182 nest = nla_nest_start(skb, MDBA_MDB); 183 if (nest == NULL) 184 goto cancel; 185 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 186 if (nest2 == NULL) 187 goto end; 188 189 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 190 goto end; 191 192 nla_nest_end(skb, nest2); 193 nla_nest_end(skb, nest); 194 nlmsg_end(skb, nlh); 195 return 0; 196 197 end: 198 nla_nest_end(skb, nest); 199 cancel: 200 nlmsg_cancel(skb, nlh); 201 return -EMSGSIZE; 202 } 203 204 static inline size_t rtnl_mdb_nlmsg_size(void) 205 { 206 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 207 + nla_total_size(sizeof(struct br_mdb_entry)); 208 } 209 210 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, 211 int type) 212 { 213 struct net *net = dev_net(dev); 214 struct sk_buff *skb; 215 int err = -ENOBUFS; 216 217 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 218 if (!skb) 219 goto errout; 220 221 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 222 if (err < 0) { 223 kfree_skb(skb); 224 goto errout; 225 } 226 227 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 228 return; 229 errout: 230 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 231 } 232 233 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 234 struct br_ip *group, int type, u8 state) 235 { 236 struct br_mdb_entry entry; 237 238 memset(&entry, 0, sizeof(entry)); 239 entry.ifindex = port->dev->ifindex; 240 entry.addr.proto = group->proto; 241 entry.addr.u.ip4 = group->u.ip4; 242 #if IS_ENABLED(CONFIG_IPV6) 243 entry.addr.u.ip6 = group->u.ip6; 244 #endif 245 entry.state = state; 246 entry.vid = group->vid; 247 __br_mdb_notify(dev, &entry, type); 248 } 249 250 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 251 struct net_device *dev, 252 int ifindex, u32 pid, 253 u32 seq, int type, unsigned int flags) 254 { 255 struct br_port_msg *bpm; 256 struct nlmsghdr *nlh; 257 struct nlattr *nest; 258 259 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 260 if (!nlh) 261 return -EMSGSIZE; 262 263 bpm = nlmsg_data(nlh); 264 memset(bpm, 0, sizeof(*bpm)); 265 bpm->family = AF_BRIDGE; 266 bpm->ifindex = dev->ifindex; 267 nest = nla_nest_start(skb, MDBA_ROUTER); 268 if (!nest) 269 goto cancel; 270 271 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 272 goto end; 273 274 nla_nest_end(skb, nest); 275 nlmsg_end(skb, nlh); 276 return 0; 277 278 end: 279 nla_nest_end(skb, nest); 280 cancel: 281 nlmsg_cancel(skb, nlh); 282 return -EMSGSIZE; 283 } 284 285 static inline size_t rtnl_rtr_nlmsg_size(void) 286 { 287 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 288 + nla_total_size(sizeof(__u32)); 289 } 290 291 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 292 int type) 293 { 294 struct net *net = dev_net(dev); 295 struct sk_buff *skb; 296 int err = -ENOBUFS; 297 int ifindex; 298 299 ifindex = port ? port->dev->ifindex : 0; 300 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 301 if (!skb) 302 goto errout; 303 304 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 305 if (err < 0) { 306 kfree_skb(skb); 307 goto errout; 308 } 309 310 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 311 return; 312 313 errout: 314 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 315 } 316 317 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 318 { 319 if (entry->ifindex == 0) 320 return false; 321 322 if (entry->addr.proto == htons(ETH_P_IP)) { 323 if (!ipv4_is_multicast(entry->addr.u.ip4)) 324 return false; 325 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 326 return false; 327 #if IS_ENABLED(CONFIG_IPV6) 328 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 329 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 330 return false; 331 #endif 332 } else 333 return false; 334 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 335 return false; 336 if (entry->vid >= VLAN_VID_MASK) 337 return false; 338 339 return true; 340 } 341 342 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 343 struct net_device **pdev, struct br_mdb_entry **pentry) 344 { 345 struct net *net = sock_net(skb->sk); 346 struct br_mdb_entry *entry; 347 struct br_port_msg *bpm; 348 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 349 struct net_device *dev; 350 int err; 351 352 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL); 353 if (err < 0) 354 return err; 355 356 bpm = nlmsg_data(nlh); 357 if (bpm->ifindex == 0) { 358 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 359 return -EINVAL; 360 } 361 362 dev = __dev_get_by_index(net, bpm->ifindex); 363 if (dev == NULL) { 364 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 365 return -ENODEV; 366 } 367 368 if (!(dev->priv_flags & IFF_EBRIDGE)) { 369 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 370 return -EOPNOTSUPP; 371 } 372 373 *pdev = dev; 374 375 if (!tb[MDBA_SET_ENTRY] || 376 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 377 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 378 return -EINVAL; 379 } 380 381 entry = nla_data(tb[MDBA_SET_ENTRY]); 382 if (!is_valid_mdb_entry(entry)) { 383 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 384 return -EINVAL; 385 } 386 387 *pentry = entry; 388 return 0; 389 } 390 391 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 392 struct br_ip *group, unsigned char state) 393 { 394 struct net_bridge_mdb_entry *mp; 395 struct net_bridge_port_group *p; 396 struct net_bridge_port_group __rcu **pp; 397 struct net_bridge_mdb_htable *mdb; 398 unsigned long now = jiffies; 399 int err; 400 401 mdb = mlock_dereference(br->mdb, br); 402 mp = br_mdb_ip_get(mdb, group); 403 if (!mp) { 404 mp = br_multicast_new_group(br, port, group); 405 err = PTR_ERR(mp); 406 if (IS_ERR(mp)) 407 return err; 408 } 409 410 for (pp = &mp->ports; 411 (p = mlock_dereference(*pp, br)) != NULL; 412 pp = &p->next) { 413 if (p->port == port) 414 return -EEXIST; 415 if ((unsigned long)p->port < (unsigned long)port) 416 break; 417 } 418 419 p = br_multicast_new_port_group(port, group, *pp, state); 420 if (unlikely(!p)) 421 return -ENOMEM; 422 rcu_assign_pointer(*pp, p); 423 if (state == MDB_TEMPORARY) 424 mod_timer(&p->timer, now + br->multicast_membership_interval); 425 426 return 0; 427 } 428 429 static int __br_mdb_add(struct net *net, struct net_bridge *br, 430 struct br_mdb_entry *entry) 431 { 432 struct br_ip ip; 433 struct net_device *dev; 434 struct net_bridge_port *p; 435 int ret; 436 437 if (!netif_running(br->dev) || br->multicast_disabled) 438 return -EINVAL; 439 440 dev = __dev_get_by_index(net, entry->ifindex); 441 if (!dev) 442 return -ENODEV; 443 444 p = br_port_get_rtnl(dev); 445 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 446 return -EINVAL; 447 448 memset(&ip, 0, sizeof(ip)); 449 ip.vid = entry->vid; 450 ip.proto = entry->addr.proto; 451 if (ip.proto == htons(ETH_P_IP)) 452 ip.u.ip4 = entry->addr.u.ip4; 453 #if IS_ENABLED(CONFIG_IPV6) 454 else 455 ip.u.ip6 = entry->addr.u.ip6; 456 #endif 457 458 spin_lock_bh(&br->multicast_lock); 459 ret = br_mdb_add_group(br, p, &ip, entry->state); 460 spin_unlock_bh(&br->multicast_lock); 461 return ret; 462 } 463 464 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) 465 { 466 struct net *net = sock_net(skb->sk); 467 struct net_bridge_vlan_group *vg; 468 struct net_device *dev, *pdev; 469 struct br_mdb_entry *entry; 470 struct net_bridge_port *p; 471 struct net_bridge_vlan *v; 472 struct net_bridge *br; 473 int err; 474 475 err = br_mdb_parse(skb, nlh, &dev, &entry); 476 if (err < 0) 477 return err; 478 479 br = netdev_priv(dev); 480 481 /* If vlan filtering is enabled and VLAN is not specified 482 * install mdb entry on all vlans configured on the port. 483 */ 484 pdev = __dev_get_by_index(net, entry->ifindex); 485 if (!pdev) 486 return -ENODEV; 487 488 p = br_port_get_rtnl(pdev); 489 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 490 return -EINVAL; 491 492 vg = nbp_vlan_group(p); 493 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 494 list_for_each_entry(v, &vg->vlan_list, vlist) { 495 entry->vid = v->vid; 496 err = __br_mdb_add(net, br, entry); 497 if (err) 498 break; 499 __br_mdb_notify(dev, entry, RTM_NEWMDB); 500 } 501 } else { 502 err = __br_mdb_add(net, br, entry); 503 if (!err) 504 __br_mdb_notify(dev, entry, RTM_NEWMDB); 505 } 506 507 return err; 508 } 509 510 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 511 { 512 struct net_bridge_mdb_htable *mdb; 513 struct net_bridge_mdb_entry *mp; 514 struct net_bridge_port_group *p; 515 struct net_bridge_port_group __rcu **pp; 516 struct br_ip ip; 517 int err = -EINVAL; 518 519 if (!netif_running(br->dev) || br->multicast_disabled) 520 return -EINVAL; 521 522 memset(&ip, 0, sizeof(ip)); 523 ip.vid = entry->vid; 524 ip.proto = entry->addr.proto; 525 if (ip.proto == htons(ETH_P_IP)) 526 ip.u.ip4 = entry->addr.u.ip4; 527 #if IS_ENABLED(CONFIG_IPV6) 528 else 529 ip.u.ip6 = entry->addr.u.ip6; 530 #endif 531 532 spin_lock_bh(&br->multicast_lock); 533 mdb = mlock_dereference(br->mdb, br); 534 535 mp = br_mdb_ip_get(mdb, &ip); 536 if (!mp) 537 goto unlock; 538 539 for (pp = &mp->ports; 540 (p = mlock_dereference(*pp, br)) != NULL; 541 pp = &p->next) { 542 if (!p->port || p->port->dev->ifindex != entry->ifindex) 543 continue; 544 545 if (p->port->state == BR_STATE_DISABLED) 546 goto unlock; 547 548 entry->state = p->state; 549 rcu_assign_pointer(*pp, p->next); 550 hlist_del_init(&p->mglist); 551 del_timer(&p->timer); 552 call_rcu_bh(&p->rcu, br_multicast_free_pg); 553 err = 0; 554 555 if (!mp->ports && !mp->mglist && 556 netif_running(br->dev)) 557 mod_timer(&mp->timer, jiffies); 558 break; 559 } 560 561 unlock: 562 spin_unlock_bh(&br->multicast_lock); 563 return err; 564 } 565 566 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) 567 { 568 struct net *net = sock_net(skb->sk); 569 struct net_bridge_vlan_group *vg; 570 struct net_device *dev, *pdev; 571 struct br_mdb_entry *entry; 572 struct net_bridge_port *p; 573 struct net_bridge_vlan *v; 574 struct net_bridge *br; 575 int err; 576 577 err = br_mdb_parse(skb, nlh, &dev, &entry); 578 if (err < 0) 579 return err; 580 581 br = netdev_priv(dev); 582 583 /* If vlan filtering is enabled and VLAN is not specified 584 * delete mdb entry on all vlans configured on the port. 585 */ 586 pdev = __dev_get_by_index(net, entry->ifindex); 587 if (!pdev) 588 return -ENODEV; 589 590 p = br_port_get_rtnl(pdev); 591 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 592 return -EINVAL; 593 594 vg = nbp_vlan_group(p); 595 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 596 list_for_each_entry(v, &vg->vlan_list, vlist) { 597 entry->vid = v->vid; 598 err = __br_mdb_del(br, entry); 599 if (!err) 600 __br_mdb_notify(dev, entry, RTM_DELMDB); 601 } 602 } else { 603 err = __br_mdb_del(br, entry); 604 if (!err) 605 __br_mdb_notify(dev, entry, RTM_DELMDB); 606 } 607 608 return err; 609 } 610 611 void br_mdb_init(void) 612 { 613 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL); 614 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); 615 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); 616 } 617 618 void br_mdb_uninit(void) 619 { 620 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 621 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 622 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 623 } 624