1 #include <linux/err.h> 2 #include <linux/igmp.h> 3 #include <linux/kernel.h> 4 #include <linux/netdevice.h> 5 #include <linux/rculist.h> 6 #include <linux/skbuff.h> 7 #include <linux/if_ether.h> 8 #include <net/ip.h> 9 #include <net/netlink.h> 10 #if IS_ENABLED(CONFIG_IPV6) 11 #include <net/ipv6.h> 12 #include <net/addrconf.h> 13 #endif 14 15 #include "br_private.h" 16 17 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 18 struct net_device *dev) 19 { 20 struct net_bridge *br = netdev_priv(dev); 21 struct net_bridge_port *p; 22 struct nlattr *nest; 23 24 if (!br->multicast_router || hlist_empty(&br->router_list)) 25 return 0; 26 27 nest = nla_nest_start(skb, MDBA_ROUTER); 28 if (nest == NULL) 29 return -EMSGSIZE; 30 31 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 32 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) 33 goto fail; 34 } 35 36 nla_nest_end(skb, nest); 37 return 0; 38 fail: 39 nla_nest_cancel(skb, nest); 40 return -EMSGSIZE; 41 } 42 43 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 44 struct net_device *dev) 45 { 46 struct net_bridge *br = netdev_priv(dev); 47 struct net_bridge_mdb_htable *mdb; 48 struct nlattr *nest, *nest2; 49 int i, err = 0; 50 int idx = 0, s_idx = cb->args[1]; 51 52 if (br->multicast_disabled) 53 return 0; 54 55 mdb = rcu_dereference(br->mdb); 56 if (!mdb) 57 return 0; 58 59 nest = nla_nest_start(skb, MDBA_MDB); 60 if (nest == NULL) 61 return -EMSGSIZE; 62 63 for (i = 0; i < mdb->max; i++) { 64 struct net_bridge_mdb_entry *mp; 65 struct net_bridge_port_group *p; 66 struct net_bridge_port_group __rcu **pp; 67 struct net_bridge_port *port; 68 69 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { 70 if (idx < s_idx) 71 goto skip; 72 73 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 74 if (nest2 == NULL) { 75 err = -EMSGSIZE; 76 goto out; 77 } 78 79 for (pp = &mp->ports; 80 (p = rcu_dereference(*pp)) != NULL; 81 pp = &p->next) { 82 port = p->port; 83 if (port) { 84 struct br_mdb_entry e; 85 memset(&e, 0, sizeof(e)); 86 e.ifindex = port->dev->ifindex; 87 e.state = p->state; 88 if (p->addr.proto == htons(ETH_P_IP)) 89 e.addr.u.ip4 = p->addr.u.ip4; 90 #if IS_ENABLED(CONFIG_IPV6) 91 if (p->addr.proto == htons(ETH_P_IPV6)) 92 e.addr.u.ip6 = p->addr.u.ip6; 93 #endif 94 e.addr.proto = p->addr.proto; 95 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) { 96 nla_nest_cancel(skb, nest2); 97 err = -EMSGSIZE; 98 goto out; 99 } 100 } 101 } 102 nla_nest_end(skb, nest2); 103 skip: 104 idx++; 105 } 106 } 107 108 out: 109 cb->args[1] = idx; 110 nla_nest_end(skb, nest); 111 return err; 112 } 113 114 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 115 { 116 struct net_device *dev; 117 struct net *net = sock_net(skb->sk); 118 struct nlmsghdr *nlh = NULL; 119 int idx = 0, s_idx; 120 121 s_idx = cb->args[0]; 122 123 rcu_read_lock(); 124 125 /* In theory this could be wrapped to 0... */ 126 cb->seq = net->dev_base_seq + br_mdb_rehash_seq; 127 128 for_each_netdev_rcu(net, dev) { 129 if (dev->priv_flags & IFF_EBRIDGE) { 130 struct br_port_msg *bpm; 131 132 if (idx < s_idx) 133 goto skip; 134 135 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 136 cb->nlh->nlmsg_seq, RTM_GETMDB, 137 sizeof(*bpm), NLM_F_MULTI); 138 if (nlh == NULL) 139 break; 140 141 bpm = nlmsg_data(nlh); 142 memset(bpm, 0, sizeof(*bpm)); 143 bpm->ifindex = dev->ifindex; 144 if (br_mdb_fill_info(skb, cb, dev) < 0) 145 goto out; 146 if (br_rports_fill_info(skb, cb, dev) < 0) 147 goto out; 148 149 cb->args[1] = 0; 150 nlmsg_end(skb, nlh); 151 skip: 152 idx++; 153 } 154 } 155 156 out: 157 if (nlh) 158 nlmsg_end(skb, nlh); 159 rcu_read_unlock(); 160 cb->args[0] = idx; 161 return skb->len; 162 } 163 164 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 165 struct net_device *dev, 166 struct br_mdb_entry *entry, u32 pid, 167 u32 seq, int type, unsigned int flags) 168 { 169 struct nlmsghdr *nlh; 170 struct br_port_msg *bpm; 171 struct nlattr *nest, *nest2; 172 173 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 174 if (!nlh) 175 return -EMSGSIZE; 176 177 bpm = nlmsg_data(nlh); 178 memset(bpm, 0, sizeof(*bpm)); 179 bpm->family = AF_BRIDGE; 180 bpm->ifindex = dev->ifindex; 181 nest = nla_nest_start(skb, MDBA_MDB); 182 if (nest == NULL) 183 goto cancel; 184 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 185 if (nest2 == NULL) 186 goto end; 187 188 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 189 goto end; 190 191 nla_nest_end(skb, nest2); 192 nla_nest_end(skb, nest); 193 return nlmsg_end(skb, nlh); 194 195 end: 196 nla_nest_end(skb, nest); 197 cancel: 198 nlmsg_cancel(skb, nlh); 199 return -EMSGSIZE; 200 } 201 202 static inline size_t rtnl_mdb_nlmsg_size(void) 203 { 204 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 205 + nla_total_size(sizeof(struct br_mdb_entry)); 206 } 207 208 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, 209 int type) 210 { 211 struct net *net = dev_net(dev); 212 struct sk_buff *skb; 213 int err = -ENOBUFS; 214 215 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 216 if (!skb) 217 goto errout; 218 219 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 220 if (err < 0) { 221 kfree_skb(skb); 222 goto errout; 223 } 224 225 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 226 return; 227 errout: 228 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 229 } 230 231 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 232 struct br_ip *group, int type) 233 { 234 struct br_mdb_entry entry; 235 236 memset(&entry, 0, sizeof(entry)); 237 entry.ifindex = port->dev->ifindex; 238 entry.addr.proto = group->proto; 239 entry.addr.u.ip4 = group->u.ip4; 240 #if IS_ENABLED(CONFIG_IPV6) 241 entry.addr.u.ip6 = group->u.ip6; 242 #endif 243 __br_mdb_notify(dev, &entry, type); 244 } 245 246 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 247 { 248 if (entry->ifindex == 0) 249 return false; 250 251 if (entry->addr.proto == htons(ETH_P_IP)) { 252 if (!ipv4_is_multicast(entry->addr.u.ip4)) 253 return false; 254 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 255 return false; 256 #if IS_ENABLED(CONFIG_IPV6) 257 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 258 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 259 return false; 260 #endif 261 } else 262 return false; 263 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 264 return false; 265 266 return true; 267 } 268 269 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 270 struct net_device **pdev, struct br_mdb_entry **pentry) 271 { 272 struct net *net = sock_net(skb->sk); 273 struct br_mdb_entry *entry; 274 struct br_port_msg *bpm; 275 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 276 struct net_device *dev; 277 int err; 278 279 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL); 280 if (err < 0) 281 return err; 282 283 bpm = nlmsg_data(nlh); 284 if (bpm->ifindex == 0) { 285 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 286 return -EINVAL; 287 } 288 289 dev = __dev_get_by_index(net, bpm->ifindex); 290 if (dev == NULL) { 291 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 292 return -ENODEV; 293 } 294 295 if (!(dev->priv_flags & IFF_EBRIDGE)) { 296 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 297 return -EOPNOTSUPP; 298 } 299 300 *pdev = dev; 301 302 if (!tb[MDBA_SET_ENTRY] || 303 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 304 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 305 return -EINVAL; 306 } 307 308 entry = nla_data(tb[MDBA_SET_ENTRY]); 309 if (!is_valid_mdb_entry(entry)) { 310 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 311 return -EINVAL; 312 } 313 314 *pentry = entry; 315 return 0; 316 } 317 318 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 319 struct br_ip *group, unsigned char state) 320 { 321 struct net_bridge_mdb_entry *mp; 322 struct net_bridge_port_group *p; 323 struct net_bridge_port_group __rcu **pp; 324 struct net_bridge_mdb_htable *mdb; 325 int err; 326 327 mdb = mlock_dereference(br->mdb, br); 328 mp = br_mdb_ip_get(mdb, group); 329 if (!mp) { 330 mp = br_multicast_new_group(br, port, group); 331 err = PTR_ERR(mp); 332 if (IS_ERR(mp)) 333 return err; 334 } 335 336 for (pp = &mp->ports; 337 (p = mlock_dereference(*pp, br)) != NULL; 338 pp = &p->next) { 339 if (p->port == port) 340 return -EEXIST; 341 if ((unsigned long)p->port < (unsigned long)port) 342 break; 343 } 344 345 p = br_multicast_new_port_group(port, group, *pp, state); 346 if (unlikely(!p)) 347 return -ENOMEM; 348 rcu_assign_pointer(*pp, p); 349 350 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 351 return 0; 352 } 353 354 static int __br_mdb_add(struct net *net, struct net_bridge *br, 355 struct br_mdb_entry *entry) 356 { 357 struct br_ip ip; 358 struct net_device *dev; 359 struct net_bridge_port *p; 360 int ret; 361 362 if (!netif_running(br->dev) || br->multicast_disabled) 363 return -EINVAL; 364 365 dev = __dev_get_by_index(net, entry->ifindex); 366 if (!dev) 367 return -ENODEV; 368 369 p = br_port_get_rtnl(dev); 370 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 371 return -EINVAL; 372 373 ip.proto = entry->addr.proto; 374 if (ip.proto == htons(ETH_P_IP)) 375 ip.u.ip4 = entry->addr.u.ip4; 376 #if IS_ENABLED(CONFIG_IPV6) 377 else 378 ip.u.ip6 = entry->addr.u.ip6; 379 #endif 380 381 spin_lock_bh(&br->multicast_lock); 382 ret = br_mdb_add_group(br, p, &ip, entry->state); 383 spin_unlock_bh(&br->multicast_lock); 384 return ret; 385 } 386 387 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) 388 { 389 struct net *net = sock_net(skb->sk); 390 struct br_mdb_entry *entry; 391 struct net_device *dev; 392 struct net_bridge *br; 393 int err; 394 395 err = br_mdb_parse(skb, nlh, &dev, &entry); 396 if (err < 0) 397 return err; 398 399 br = netdev_priv(dev); 400 401 err = __br_mdb_add(net, br, entry); 402 if (!err) 403 __br_mdb_notify(dev, entry, RTM_NEWMDB); 404 return err; 405 } 406 407 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 408 { 409 struct net_bridge_mdb_htable *mdb; 410 struct net_bridge_mdb_entry *mp; 411 struct net_bridge_port_group *p; 412 struct net_bridge_port_group __rcu **pp; 413 struct br_ip ip; 414 int err = -EINVAL; 415 416 if (!netif_running(br->dev) || br->multicast_disabled) 417 return -EINVAL; 418 419 ip.proto = entry->addr.proto; 420 if (ip.proto == htons(ETH_P_IP)) { 421 if (timer_pending(&br->ip4_querier.timer)) 422 return -EBUSY; 423 424 ip.u.ip4 = entry->addr.u.ip4; 425 #if IS_ENABLED(CONFIG_IPV6) 426 } else { 427 if (timer_pending(&br->ip6_querier.timer)) 428 return -EBUSY; 429 430 ip.u.ip6 = entry->addr.u.ip6; 431 #endif 432 } 433 434 spin_lock_bh(&br->multicast_lock); 435 mdb = mlock_dereference(br->mdb, br); 436 437 mp = br_mdb_ip_get(mdb, &ip); 438 if (!mp) 439 goto unlock; 440 441 for (pp = &mp->ports; 442 (p = mlock_dereference(*pp, br)) != NULL; 443 pp = &p->next) { 444 if (!p->port || p->port->dev->ifindex != entry->ifindex) 445 continue; 446 447 if (p->port->state == BR_STATE_DISABLED) 448 goto unlock; 449 450 rcu_assign_pointer(*pp, p->next); 451 hlist_del_init(&p->mglist); 452 del_timer(&p->timer); 453 call_rcu_bh(&p->rcu, br_multicast_free_pg); 454 err = 0; 455 456 if (!mp->ports && !mp->mglist && mp->timer_armed && 457 netif_running(br->dev)) 458 mod_timer(&mp->timer, jiffies); 459 break; 460 } 461 462 unlock: 463 spin_unlock_bh(&br->multicast_lock); 464 return err; 465 } 466 467 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) 468 { 469 struct net_device *dev; 470 struct br_mdb_entry *entry; 471 struct net_bridge *br; 472 int err; 473 474 err = br_mdb_parse(skb, nlh, &dev, &entry); 475 if (err < 0) 476 return err; 477 478 br = netdev_priv(dev); 479 480 err = __br_mdb_del(br, entry); 481 if (!err) 482 __br_mdb_notify(dev, entry, RTM_DELMDB); 483 return err; 484 } 485 486 void br_mdb_init(void) 487 { 488 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL); 489 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); 490 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); 491 } 492 493 void br_mdb_uninit(void) 494 { 495 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 496 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 497 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 498 } 499