1 /* 2 * Bridge netlink control interface 3 * 4 * Authors: 5 * Stephen Hemminger <shemminger@osdl.org> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/etherdevice.h> 16 #include <net/rtnetlink.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <uapi/linux/if_bridge.h> 20 21 #include "br_private.h" 22 #include "br_private_stp.h" 23 #include "br_private_tunnel.h" 24 25 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, 26 u32 filter_mask) 27 { 28 struct net_bridge_vlan *v; 29 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 30 u16 flags, pvid; 31 int num_vlans = 0; 32 33 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 34 return 0; 35 36 pvid = br_get_pvid(vg); 37 /* Count number of vlan infos */ 38 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 39 flags = 0; 40 /* only a context, bridge vlan not activated */ 41 if (!br_vlan_should_use(v)) 42 continue; 43 if (v->vid == pvid) 44 flags |= BRIDGE_VLAN_INFO_PVID; 45 46 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 47 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 48 49 if (vid_range_start == 0) { 50 goto initvars; 51 } else if ((v->vid - vid_range_end) == 1 && 52 flags == vid_range_flags) { 53 vid_range_end = v->vid; 54 continue; 55 } else { 56 if ((vid_range_end - vid_range_start) > 0) 57 num_vlans += 2; 58 else 59 num_vlans += 1; 60 } 61 initvars: 62 vid_range_start = v->vid; 63 vid_range_end = v->vid; 64 vid_range_flags = flags; 65 } 66 67 if (vid_range_start != 0) { 68 if ((vid_range_end - vid_range_start) > 0) 69 num_vlans += 2; 70 else 71 num_vlans += 1; 72 } 73 74 return num_vlans; 75 } 76 77 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, 78 u32 filter_mask) 79 { 80 int num_vlans; 81 82 if (!vg) 83 return 0; 84 85 if (filter_mask & RTEXT_FILTER_BRVLAN) 86 return vg->num_vlans; 87 88 rcu_read_lock(); 89 num_vlans = __get_num_vlan_infos(vg, filter_mask); 90 rcu_read_unlock(); 91 92 return num_vlans; 93 } 94 95 static size_t br_get_link_af_size_filtered(const struct net_device *dev, 96 u32 filter_mask) 97 { 98 struct net_bridge_vlan_group *vg = NULL; 99 struct net_bridge_port *p = NULL; 100 struct net_bridge *br; 101 int num_vlan_infos; 102 size_t vinfo_sz = 0; 103 104 rcu_read_lock(); 105 if (br_port_exists(dev)) { 106 p = br_port_get_rcu(dev); 107 vg = nbp_vlan_group_rcu(p); 108 } else if (dev->priv_flags & IFF_EBRIDGE) { 109 br = netdev_priv(dev); 110 vg = br_vlan_group_rcu(br); 111 } 112 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); 113 rcu_read_unlock(); 114 115 if (p && (p->flags & BR_VLAN_TUNNEL)) 116 vinfo_sz += br_get_vlan_tunnel_info_size(vg); 117 118 /* Each VLAN is returned in bridge_vlan_info along with flags */ 119 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); 120 121 return vinfo_sz; 122 } 123 124 static inline size_t br_port_info_size(void) 125 { 126 return nla_total_size(1) /* IFLA_BRPORT_STATE */ 127 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ 128 + nla_total_size(4) /* IFLA_BRPORT_COST */ 129 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 130 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 131 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 132 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ 134 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 135 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 136 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ 137 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ 138 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ 139 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ 140 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ 141 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ 142 + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ 143 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ 144 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ 145 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ 146 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ 147 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ 148 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ 149 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ 150 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ 151 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ 152 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ 153 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ 154 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 155 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ 156 #endif 157 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ 158 + 0; 159 } 160 161 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) 162 { 163 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 164 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 165 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 166 + nla_total_size(4) /* IFLA_MASTER */ 167 + nla_total_size(4) /* IFLA_MTU */ 168 + nla_total_size(4) /* IFLA_LINK */ 169 + nla_total_size(1) /* IFLA_OPERSTATE */ 170 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ 171 + nla_total_size(br_get_link_af_size_filtered(dev, 172 filter_mask)) /* IFLA_AF_SPEC */ 173 + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */ 174 } 175 176 static int br_port_fill_attrs(struct sk_buff *skb, 177 const struct net_bridge_port *p) 178 { 179 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); 180 struct net_bridge_port *backup_p; 181 u64 timerval; 182 183 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || 184 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || 185 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || 186 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || 187 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || 188 nla_put_u8(skb, IFLA_BRPORT_PROTECT, 189 !!(p->flags & BR_ROOT_BLOCK)) || 190 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 191 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || 192 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, 193 !!(p->flags & BR_MULTICAST_TO_UNICAST)) || 194 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || 195 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 196 !!(p->flags & BR_FLOOD)) || 197 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, 198 !!(p->flags & BR_MCAST_FLOOD)) || 199 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, 200 !!(p->flags & BR_BCAST_FLOOD)) || 201 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || 202 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, 203 !!(p->flags & BR_PROXYARP_WIFI)) || 204 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), 205 &p->designated_root) || 206 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), 207 &p->designated_bridge) || 208 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || 209 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || 210 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || 211 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || 212 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 213 p->topology_change_ack) || 214 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || 215 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & 216 BR_VLAN_TUNNEL)) || 217 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) || 218 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS, 219 !!(p->flags & BR_NEIGH_SUPPRESS)) || 220 nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) 221 return -EMSGSIZE; 222 223 timerval = br_timer_value(&p->message_age_timer); 224 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, 225 IFLA_BRPORT_PAD)) 226 return -EMSGSIZE; 227 timerval = br_timer_value(&p->forward_delay_timer); 228 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, 229 IFLA_BRPORT_PAD)) 230 return -EMSGSIZE; 231 timerval = br_timer_value(&p->hold_timer); 232 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, 233 IFLA_BRPORT_PAD)) 234 return -EMSGSIZE; 235 236 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 237 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, 238 p->multicast_router)) 239 return -EMSGSIZE; 240 #endif 241 242 /* we might be called only with br->lock */ 243 rcu_read_lock(); 244 backup_p = rcu_dereference(p->backup_port); 245 if (backup_p) 246 nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT, 247 backup_p->dev->ifindex); 248 rcu_read_unlock(); 249 250 return 0; 251 } 252 253 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, 254 u16 vid_end, u16 flags) 255 { 256 struct bridge_vlan_info vinfo; 257 258 if ((vid_end - vid_start) > 0) { 259 /* add range to skb */ 260 vinfo.vid = vid_start; 261 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; 262 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 263 sizeof(vinfo), &vinfo)) 264 goto nla_put_failure; 265 266 vinfo.vid = vid_end; 267 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; 268 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 269 sizeof(vinfo), &vinfo)) 270 goto nla_put_failure; 271 } else { 272 vinfo.vid = vid_start; 273 vinfo.flags = flags; 274 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 275 sizeof(vinfo), &vinfo)) 276 goto nla_put_failure; 277 } 278 279 return 0; 280 281 nla_put_failure: 282 return -EMSGSIZE; 283 } 284 285 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, 286 struct net_bridge_vlan_group *vg) 287 { 288 struct net_bridge_vlan *v; 289 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 290 u16 flags, pvid; 291 int err = 0; 292 293 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan 294 * and mark vlan info with begin and end flags 295 * if vlaninfo represents a range 296 */ 297 pvid = br_get_pvid(vg); 298 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 299 flags = 0; 300 if (!br_vlan_should_use(v)) 301 continue; 302 if (v->vid == pvid) 303 flags |= BRIDGE_VLAN_INFO_PVID; 304 305 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 306 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 307 308 if (vid_range_start == 0) { 309 goto initvars; 310 } else if ((v->vid - vid_range_end) == 1 && 311 flags == vid_range_flags) { 312 vid_range_end = v->vid; 313 continue; 314 } else { 315 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 316 vid_range_end, 317 vid_range_flags); 318 if (err) 319 return err; 320 } 321 322 initvars: 323 vid_range_start = v->vid; 324 vid_range_end = v->vid; 325 vid_range_flags = flags; 326 } 327 328 if (vid_range_start != 0) { 329 /* Call it once more to send any left over vlans */ 330 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 331 vid_range_end, 332 vid_range_flags); 333 if (err) 334 return err; 335 } 336 337 return 0; 338 } 339 340 static int br_fill_ifvlaninfo(struct sk_buff *skb, 341 struct net_bridge_vlan_group *vg) 342 { 343 struct bridge_vlan_info vinfo; 344 struct net_bridge_vlan *v; 345 u16 pvid; 346 347 pvid = br_get_pvid(vg); 348 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 349 if (!br_vlan_should_use(v)) 350 continue; 351 352 vinfo.vid = v->vid; 353 vinfo.flags = 0; 354 if (v->vid == pvid) 355 vinfo.flags |= BRIDGE_VLAN_INFO_PVID; 356 357 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 358 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 359 360 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 361 sizeof(vinfo), &vinfo)) 362 goto nla_put_failure; 363 } 364 365 return 0; 366 367 nla_put_failure: 368 return -EMSGSIZE; 369 } 370 371 /* 372 * Create one netlink message for one interface 373 * Contains port and master info as well as carrier and bridge state. 374 */ 375 static int br_fill_ifinfo(struct sk_buff *skb, 376 const struct net_bridge_port *port, 377 u32 pid, u32 seq, int event, unsigned int flags, 378 u32 filter_mask, const struct net_device *dev) 379 { 380 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 381 struct net_bridge *br; 382 struct ifinfomsg *hdr; 383 struct nlmsghdr *nlh; 384 385 if (port) 386 br = port->br; 387 else 388 br = netdev_priv(dev); 389 390 br_debug(br, "br_fill_info event %d port %s master %s\n", 391 event, dev->name, br->dev->name); 392 393 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 394 if (nlh == NULL) 395 return -EMSGSIZE; 396 397 hdr = nlmsg_data(nlh); 398 hdr->ifi_family = AF_BRIDGE; 399 hdr->__ifi_pad = 0; 400 hdr->ifi_type = dev->type; 401 hdr->ifi_index = dev->ifindex; 402 hdr->ifi_flags = dev_get_flags(dev); 403 hdr->ifi_change = 0; 404 405 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 406 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || 407 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 408 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 409 (dev->addr_len && 410 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 411 (dev->ifindex != dev_get_iflink(dev) && 412 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 413 goto nla_put_failure; 414 415 if (event == RTM_NEWLINK && port) { 416 struct nlattr *nest 417 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 418 419 if (nest == NULL || br_port_fill_attrs(skb, port) < 0) 420 goto nla_put_failure; 421 nla_nest_end(skb, nest); 422 } 423 424 /* Check if the VID information is requested */ 425 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 426 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 427 struct net_bridge_vlan_group *vg; 428 struct nlattr *af; 429 int err; 430 431 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ 432 rcu_read_lock(); 433 if (port) 434 vg = nbp_vlan_group_rcu(port); 435 else 436 vg = br_vlan_group_rcu(br); 437 438 if (!vg || !vg->num_vlans) { 439 rcu_read_unlock(); 440 goto done; 441 } 442 af = nla_nest_start(skb, IFLA_AF_SPEC); 443 if (!af) { 444 rcu_read_unlock(); 445 goto nla_put_failure; 446 } 447 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 448 err = br_fill_ifvlaninfo_compressed(skb, vg); 449 else 450 err = br_fill_ifvlaninfo(skb, vg); 451 452 if (port && (port->flags & BR_VLAN_TUNNEL)) 453 err = br_fill_vlan_tunnel_info(skb, vg); 454 rcu_read_unlock(); 455 if (err) 456 goto nla_put_failure; 457 nla_nest_end(skb, af); 458 } 459 460 done: 461 nlmsg_end(skb, nlh); 462 return 0; 463 464 nla_put_failure: 465 nlmsg_cancel(skb, nlh); 466 return -EMSGSIZE; 467 } 468 469 /* Notify listeners of a change in bridge or port information */ 470 void br_ifinfo_notify(int event, const struct net_bridge *br, 471 const struct net_bridge_port *port) 472 { 473 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; 474 struct net_device *dev; 475 struct sk_buff *skb; 476 int err = -ENOBUFS; 477 struct net *net; 478 u16 port_no = 0; 479 480 if (WARN_ON(!port && !br)) 481 return; 482 483 if (port) { 484 dev = port->dev; 485 br = port->br; 486 port_no = port->port_no; 487 } else { 488 dev = br->dev; 489 } 490 491 net = dev_net(dev); 492 br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event); 493 494 skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC); 495 if (skb == NULL) 496 goto errout; 497 498 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev); 499 if (err < 0) { 500 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 501 WARN_ON(err == -EMSGSIZE); 502 kfree_skb(skb); 503 goto errout; 504 } 505 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 506 return; 507 errout: 508 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 509 } 510 511 /* 512 * Dump information about all ports, in response to GETLINK 513 */ 514 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 515 struct net_device *dev, u32 filter_mask, int nlflags) 516 { 517 struct net_bridge_port *port = br_port_get_rtnl(dev); 518 519 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && 520 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 521 return 0; 522 523 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, 524 filter_mask, dev); 525 } 526 527 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, 528 int cmd, struct bridge_vlan_info *vinfo, bool *changed) 529 { 530 bool curr_change; 531 int err = 0; 532 533 switch (cmd) { 534 case RTM_SETLINK: 535 if (p) { 536 /* if the MASTER flag is set this will act on the global 537 * per-VLAN entry as well 538 */ 539 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags, 540 &curr_change); 541 } else { 542 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; 543 err = br_vlan_add(br, vinfo->vid, vinfo->flags, 544 &curr_change); 545 } 546 if (curr_change) 547 *changed = true; 548 break; 549 550 case RTM_DELLINK: 551 if (p) { 552 if (!nbp_vlan_delete(p, vinfo->vid)) 553 *changed = true; 554 555 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) && 556 !br_vlan_delete(p->br, vinfo->vid)) 557 *changed = true; 558 } else if (!br_vlan_delete(br, vinfo->vid)) { 559 *changed = true; 560 } 561 break; 562 } 563 564 return err; 565 } 566 567 static int br_process_vlan_info(struct net_bridge *br, 568 struct net_bridge_port *p, int cmd, 569 struct bridge_vlan_info *vinfo_curr, 570 struct bridge_vlan_info **vinfo_last, 571 bool *changed) 572 { 573 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK) 574 return -EINVAL; 575 576 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 577 /* check if we are already processing a range */ 578 if (*vinfo_last) 579 return -EINVAL; 580 *vinfo_last = vinfo_curr; 581 /* don't allow range of pvids */ 582 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID) 583 return -EINVAL; 584 return 0; 585 } 586 587 if (*vinfo_last) { 588 struct bridge_vlan_info tmp_vinfo; 589 int v, err; 590 591 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END)) 592 return -EINVAL; 593 594 if (vinfo_curr->vid <= (*vinfo_last)->vid) 595 return -EINVAL; 596 597 memcpy(&tmp_vinfo, *vinfo_last, 598 sizeof(struct bridge_vlan_info)); 599 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { 600 tmp_vinfo.vid = v; 601 err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed); 602 if (err) 603 break; 604 } 605 *vinfo_last = NULL; 606 607 return err; 608 } 609 610 return br_vlan_info(br, p, cmd, vinfo_curr, changed); 611 } 612 613 static int br_afspec(struct net_bridge *br, 614 struct net_bridge_port *p, 615 struct nlattr *af_spec, 616 int cmd, bool *changed) 617 { 618 struct bridge_vlan_info *vinfo_curr = NULL; 619 struct bridge_vlan_info *vinfo_last = NULL; 620 struct nlattr *attr; 621 struct vtunnel_info tinfo_last = {}; 622 struct vtunnel_info tinfo_curr = {}; 623 int err = 0, rem; 624 625 nla_for_each_nested(attr, af_spec, rem) { 626 err = 0; 627 switch (nla_type(attr)) { 628 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 629 if (!p || !(p->flags & BR_VLAN_TUNNEL)) 630 return -EINVAL; 631 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 632 if (err) 633 return err; 634 err = br_process_vlan_tunnel_info(br, p, cmd, 635 &tinfo_curr, 636 &tinfo_last, 637 changed); 638 if (err) 639 return err; 640 break; 641 case IFLA_BRIDGE_VLAN_INFO: 642 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 643 return -EINVAL; 644 vinfo_curr = nla_data(attr); 645 err = br_process_vlan_info(br, p, cmd, vinfo_curr, 646 &vinfo_last, changed); 647 if (err) 648 return err; 649 break; 650 } 651 } 652 653 return err; 654 } 655 656 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { 657 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 658 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 659 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 660 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 661 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 662 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 663 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 664 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 665 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 666 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, 667 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, 668 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, 669 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, 670 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, 671 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, 672 [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 }, 673 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, 674 [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, 675 [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, 676 [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, 677 }; 678 679 /* Change the state of the port and notify spanning tree */ 680 static int br_set_port_state(struct net_bridge_port *p, u8 state) 681 { 682 if (state > BR_STATE_BLOCKING) 683 return -EINVAL; 684 685 /* if kernel STP is running, don't allow changes */ 686 if (p->br->stp_enabled == BR_KERNEL_STP) 687 return -EBUSY; 688 689 /* if device is not up, change is not allowed 690 * if link is not present, only allowable state is disabled 691 */ 692 if (!netif_running(p->dev) || 693 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) 694 return -ENETDOWN; 695 696 br_set_state(p, state); 697 br_port_state_selection(p->br); 698 return 0; 699 } 700 701 /* Set/clear or port flags based on attribute */ 702 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], 703 int attrtype, unsigned long mask) 704 { 705 unsigned long flags; 706 int err; 707 708 if (!tb[attrtype]) 709 return 0; 710 711 if (nla_get_u8(tb[attrtype])) 712 flags = p->flags | mask; 713 else 714 flags = p->flags & ~mask; 715 716 err = br_switchdev_set_port_flag(p, flags, mask); 717 if (err) 718 return err; 719 720 p->flags = flags; 721 return 0; 722 } 723 724 /* Process bridge protocol info on port */ 725 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 726 { 727 unsigned long old_flags = p->flags; 728 bool br_vlan_tunnel_old = false; 729 int err; 730 731 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 732 if (err) 733 return err; 734 735 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 736 if (err) 737 return err; 738 739 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 740 if (err) 741 return err; 742 743 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 744 if (err) 745 return err; 746 747 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); 748 if (err) 749 return err; 750 751 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); 752 if (err) 753 return err; 754 755 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); 756 if (err) 757 return err; 758 759 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); 760 if (err) 761 return err; 762 763 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); 764 if (err) 765 return err; 766 767 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); 768 if (err) 769 return err; 770 771 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); 772 if (err) 773 return err; 774 775 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; 776 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); 777 if (err) 778 return err; 779 780 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) 781 nbp_vlan_tunnel_info_flush(p); 782 783 if (tb[IFLA_BRPORT_COST]) { 784 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 785 if (err) 786 return err; 787 } 788 789 if (tb[IFLA_BRPORT_PRIORITY]) { 790 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); 791 if (err) 792 return err; 793 } 794 795 if (tb[IFLA_BRPORT_STATE]) { 796 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); 797 if (err) 798 return err; 799 } 800 801 if (tb[IFLA_BRPORT_FLUSH]) 802 br_fdb_delete_by_port(p->br, p, 0, 0); 803 804 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 805 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { 806 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); 807 808 err = br_multicast_set_port_router(p, mcast_router); 809 if (err) 810 return err; 811 } 812 #endif 813 814 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) { 815 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]); 816 817 if (fwd_mask & BR_GROUPFWD_MACPAUSE) 818 return -EINVAL; 819 p->group_fwd_mask = fwd_mask; 820 } 821 822 err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, 823 BR_NEIGH_SUPPRESS); 824 if (err) 825 return err; 826 827 err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED); 828 if (err) 829 return err; 830 831 if (tb[IFLA_BRPORT_BACKUP_PORT]) { 832 struct net_device *backup_dev = NULL; 833 u32 backup_ifindex; 834 835 backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]); 836 if (backup_ifindex) { 837 backup_dev = __dev_get_by_index(dev_net(p->dev), 838 backup_ifindex); 839 if (!backup_dev) 840 return -ENOENT; 841 } 842 843 err = nbp_backup_change(p, backup_dev); 844 if (err) 845 return err; 846 } 847 848 br_port_flags_change(p, old_flags ^ p->flags); 849 return 0; 850 } 851 852 /* Change state and parameters on port. */ 853 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 854 { 855 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 856 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 857 struct net_bridge_port *p; 858 struct nlattr *protinfo; 859 struct nlattr *afspec; 860 bool changed = false; 861 int err = 0; 862 863 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); 864 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 865 if (!protinfo && !afspec) 866 return 0; 867 868 p = br_port_get_rtnl(dev); 869 /* We want to accept dev as bridge itself if the AF_SPEC 870 * is set to see if someone is setting vlan info on the bridge 871 */ 872 if (!p && !afspec) 873 return -EINVAL; 874 875 if (p && protinfo) { 876 if (protinfo->nla_type & NLA_F_NESTED) { 877 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, 878 br_port_policy, NULL); 879 if (err) 880 return err; 881 882 spin_lock_bh(&p->br->lock); 883 err = br_setport(p, tb); 884 spin_unlock_bh(&p->br->lock); 885 } else { 886 /* Binary compatibility with old RSTP */ 887 if (nla_len(protinfo) < sizeof(u8)) 888 return -EINVAL; 889 890 spin_lock_bh(&p->br->lock); 891 err = br_set_port_state(p, nla_get_u8(protinfo)); 892 spin_unlock_bh(&p->br->lock); 893 } 894 if (err) 895 goto out; 896 changed = true; 897 } 898 899 if (afspec) 900 err = br_afspec(br, p, afspec, RTM_SETLINK, &changed); 901 902 if (changed) 903 br_ifinfo_notify(RTM_NEWLINK, br, p); 904 out: 905 return err; 906 } 907 908 /* Delete port information */ 909 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 910 { 911 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); 912 struct net_bridge_port *p; 913 struct nlattr *afspec; 914 bool changed = false; 915 int err = 0; 916 917 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 918 if (!afspec) 919 return 0; 920 921 p = br_port_get_rtnl(dev); 922 /* We want to accept dev as bridge itself as well */ 923 if (!p && !(dev->priv_flags & IFF_EBRIDGE)) 924 return -EINVAL; 925 926 err = br_afspec(br, p, afspec, RTM_DELLINK, &changed); 927 if (changed) 928 /* Send RTM_NEWLINK because userspace 929 * expects RTM_NEWLINK for vlan dels 930 */ 931 br_ifinfo_notify(RTM_NEWLINK, br, p); 932 933 return err; 934 } 935 936 static int br_validate(struct nlattr *tb[], struct nlattr *data[], 937 struct netlink_ext_ack *extack) 938 { 939 if (tb[IFLA_ADDRESS]) { 940 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 941 return -EINVAL; 942 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 943 return -EADDRNOTAVAIL; 944 } 945 946 if (!data) 947 return 0; 948 949 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 950 if (data[IFLA_BR_VLAN_PROTOCOL]) { 951 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) { 952 case htons(ETH_P_8021Q): 953 case htons(ETH_P_8021AD): 954 break; 955 default: 956 return -EPROTONOSUPPORT; 957 } 958 } 959 960 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 961 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 962 963 if (defpvid >= VLAN_VID_MASK) 964 return -EINVAL; 965 } 966 #endif 967 968 return 0; 969 } 970 971 static int br_port_slave_changelink(struct net_device *brdev, 972 struct net_device *dev, 973 struct nlattr *tb[], 974 struct nlattr *data[], 975 struct netlink_ext_ack *extack) 976 { 977 struct net_bridge *br = netdev_priv(brdev); 978 int ret; 979 980 if (!data) 981 return 0; 982 983 spin_lock_bh(&br->lock); 984 ret = br_setport(br_port_get_rtnl(dev), data); 985 spin_unlock_bh(&br->lock); 986 987 return ret; 988 } 989 990 static int br_port_fill_slave_info(struct sk_buff *skb, 991 const struct net_device *brdev, 992 const struct net_device *dev) 993 { 994 return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); 995 } 996 997 static size_t br_port_get_slave_size(const struct net_device *brdev, 998 const struct net_device *dev) 999 { 1000 return br_port_info_size(); 1001 } 1002 1003 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { 1004 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, 1005 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, 1006 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, 1007 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, 1008 [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, 1009 [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, 1010 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, 1011 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, 1012 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, 1013 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, 1014 .len = ETH_ALEN }, 1015 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, 1016 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, 1017 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, 1018 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, 1019 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, 1020 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, 1021 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 1022 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 1023 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 1024 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 1025 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 1026 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 1027 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 1028 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 1029 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, 1030 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, 1031 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 1032 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 1033 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 1034 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, 1035 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 1036 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 1037 }; 1038 1039 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 1040 struct nlattr *data[], 1041 struct netlink_ext_ack *extack) 1042 { 1043 struct net_bridge *br = netdev_priv(brdev); 1044 int err; 1045 1046 if (!data) 1047 return 0; 1048 1049 if (data[IFLA_BR_FORWARD_DELAY]) { 1050 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); 1051 if (err) 1052 return err; 1053 } 1054 1055 if (data[IFLA_BR_HELLO_TIME]) { 1056 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); 1057 if (err) 1058 return err; 1059 } 1060 1061 if (data[IFLA_BR_MAX_AGE]) { 1062 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); 1063 if (err) 1064 return err; 1065 } 1066 1067 if (data[IFLA_BR_AGEING_TIME]) { 1068 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); 1069 if (err) 1070 return err; 1071 } 1072 1073 if (data[IFLA_BR_STP_STATE]) { 1074 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); 1075 1076 br_stp_set_enabled(br, stp_enabled); 1077 } 1078 1079 if (data[IFLA_BR_PRIORITY]) { 1080 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); 1081 1082 br_stp_set_bridge_priority(br, priority); 1083 } 1084 1085 if (data[IFLA_BR_VLAN_FILTERING]) { 1086 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); 1087 1088 err = __br_vlan_filter_toggle(br, vlan_filter); 1089 if (err) 1090 return err; 1091 } 1092 1093 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1094 if (data[IFLA_BR_VLAN_PROTOCOL]) { 1095 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); 1096 1097 err = __br_vlan_set_proto(br, vlan_proto); 1098 if (err) 1099 return err; 1100 } 1101 1102 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1103 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1104 1105 err = __br_vlan_set_default_pvid(br, defpvid); 1106 if (err) 1107 return err; 1108 } 1109 1110 if (data[IFLA_BR_VLAN_STATS_ENABLED]) { 1111 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); 1112 1113 err = br_vlan_set_stats(br, vlan_stats); 1114 if (err) 1115 return err; 1116 } 1117 #endif 1118 1119 if (data[IFLA_BR_GROUP_FWD_MASK]) { 1120 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); 1121 1122 if (fwd_mask & BR_GROUPFWD_RESTRICTED) 1123 return -EINVAL; 1124 br->group_fwd_mask = fwd_mask; 1125 } 1126 1127 if (data[IFLA_BR_GROUP_ADDR]) { 1128 u8 new_addr[ETH_ALEN]; 1129 1130 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) 1131 return -EINVAL; 1132 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); 1133 if (!is_link_local_ether_addr(new_addr)) 1134 return -EINVAL; 1135 if (new_addr[5] == 1 || /* 802.3x Pause address */ 1136 new_addr[5] == 2 || /* 802.3ad Slow protocols */ 1137 new_addr[5] == 3) /* 802.1X PAE address */ 1138 return -EINVAL; 1139 spin_lock_bh(&br->lock); 1140 memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); 1141 spin_unlock_bh(&br->lock); 1142 br->group_addr_set = true; 1143 br_recalculate_fwd_mask(br); 1144 } 1145 1146 if (data[IFLA_BR_FDB_FLUSH]) 1147 br_fdb_flush(br); 1148 1149 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1150 if (data[IFLA_BR_MCAST_ROUTER]) { 1151 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); 1152 1153 err = br_multicast_set_router(br, multicast_router); 1154 if (err) 1155 return err; 1156 } 1157 1158 if (data[IFLA_BR_MCAST_SNOOPING]) { 1159 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); 1160 1161 err = br_multicast_toggle(br, mcast_snooping); 1162 if (err) 1163 return err; 1164 } 1165 1166 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { 1167 u8 val; 1168 1169 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); 1170 br->multicast_query_use_ifaddr = !!val; 1171 } 1172 1173 if (data[IFLA_BR_MCAST_QUERIER]) { 1174 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); 1175 1176 err = br_multicast_set_querier(br, mcast_querier); 1177 if (err) 1178 return err; 1179 } 1180 1181 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) { 1182 u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]); 1183 1184 br->hash_elasticity = val; 1185 } 1186 1187 if (data[IFLA_BR_MCAST_HASH_MAX]) { 1188 u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); 1189 1190 err = br_multicast_set_hash_max(br, hash_max); 1191 if (err) 1192 return err; 1193 } 1194 1195 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { 1196 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); 1197 1198 br->multicast_last_member_count = val; 1199 } 1200 1201 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { 1202 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); 1203 1204 br->multicast_startup_query_count = val; 1205 } 1206 1207 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { 1208 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); 1209 1210 br->multicast_last_member_interval = clock_t_to_jiffies(val); 1211 } 1212 1213 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { 1214 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); 1215 1216 br->multicast_membership_interval = clock_t_to_jiffies(val); 1217 } 1218 1219 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { 1220 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); 1221 1222 br->multicast_querier_interval = clock_t_to_jiffies(val); 1223 } 1224 1225 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1226 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1227 1228 br->multicast_query_interval = clock_t_to_jiffies(val); 1229 } 1230 1231 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { 1232 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); 1233 1234 br->multicast_query_response_interval = clock_t_to_jiffies(val); 1235 } 1236 1237 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1238 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1239 1240 br->multicast_startup_query_interval = clock_t_to_jiffies(val); 1241 } 1242 1243 if (data[IFLA_BR_MCAST_STATS_ENABLED]) { 1244 __u8 mcast_stats; 1245 1246 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); 1247 br->multicast_stats_enabled = !!mcast_stats; 1248 } 1249 1250 if (data[IFLA_BR_MCAST_IGMP_VERSION]) { 1251 __u8 igmp_version; 1252 1253 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); 1254 err = br_multicast_set_igmp_version(br, igmp_version); 1255 if (err) 1256 return err; 1257 } 1258 1259 #if IS_ENABLED(CONFIG_IPV6) 1260 if (data[IFLA_BR_MCAST_MLD_VERSION]) { 1261 __u8 mld_version; 1262 1263 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); 1264 err = br_multicast_set_mld_version(br, mld_version); 1265 if (err) 1266 return err; 1267 } 1268 #endif 1269 #endif 1270 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1271 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1272 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); 1273 1274 br->nf_call_iptables = val ? true : false; 1275 } 1276 1277 if (data[IFLA_BR_NF_CALL_IP6TABLES]) { 1278 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); 1279 1280 br->nf_call_ip6tables = val ? true : false; 1281 } 1282 1283 if (data[IFLA_BR_NF_CALL_ARPTABLES]) { 1284 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); 1285 1286 br->nf_call_arptables = val ? true : false; 1287 } 1288 #endif 1289 1290 return 0; 1291 } 1292 1293 static int br_dev_newlink(struct net *src_net, struct net_device *dev, 1294 struct nlattr *tb[], struct nlattr *data[], 1295 struct netlink_ext_ack *extack) 1296 { 1297 struct net_bridge *br = netdev_priv(dev); 1298 int err; 1299 1300 err = register_netdevice(dev); 1301 if (err) 1302 return err; 1303 1304 if (tb[IFLA_ADDRESS]) { 1305 spin_lock_bh(&br->lock); 1306 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1307 spin_unlock_bh(&br->lock); 1308 } 1309 1310 err = br_changelink(dev, tb, data, extack); 1311 if (err) 1312 br_dev_delete(dev, NULL); 1313 1314 return err; 1315 } 1316 1317 static size_t br_get_size(const struct net_device *brdev) 1318 { 1319 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1320 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ 1321 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ 1322 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ 1323 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ 1324 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 1325 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 1326 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1327 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ 1328 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ 1329 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ 1330 #endif 1331 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ 1332 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ 1333 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ 1334 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ 1335 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ 1336 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ 1337 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ 1338 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ 1339 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ 1340 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ 1341 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ 1342 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ 1343 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1344 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ 1345 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1346 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1347 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1348 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ 1349 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1350 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1351 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1352 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ 1353 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ 1354 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ 1355 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ 1356 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ 1357 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ 1358 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ 1359 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ 1360 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ 1361 #endif 1362 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1363 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ 1364 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ 1365 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ 1366 #endif 1367 0; 1368 } 1369 1370 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) 1371 { 1372 struct net_bridge *br = netdev_priv(brdev); 1373 u32 forward_delay = jiffies_to_clock_t(br->forward_delay); 1374 u32 hello_time = jiffies_to_clock_t(br->hello_time); 1375 u32 age_time = jiffies_to_clock_t(br->max_age); 1376 u32 ageing_time = jiffies_to_clock_t(br->ageing_time); 1377 u32 stp_enabled = br->stp_enabled; 1378 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; 1379 u8 vlan_enabled = br_vlan_enabled(br->dev); 1380 u64 clockval; 1381 1382 clockval = br_timer_value(&br->hello_timer); 1383 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) 1384 return -EMSGSIZE; 1385 clockval = br_timer_value(&br->tcn_timer); 1386 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) 1387 return -EMSGSIZE; 1388 clockval = br_timer_value(&br->topology_change_timer); 1389 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, 1390 IFLA_BR_PAD)) 1391 return -EMSGSIZE; 1392 clockval = br_timer_value(&br->gc_work.timer); 1393 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) 1394 return -EMSGSIZE; 1395 1396 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || 1397 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || 1398 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || 1399 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || 1400 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || 1401 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || 1402 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || 1403 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || 1404 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), 1405 &br->bridge_id) || 1406 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), 1407 &br->designated_root) || 1408 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || 1409 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || 1410 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || 1411 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 1412 br->topology_change_detected) || 1413 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr)) 1414 return -EMSGSIZE; 1415 1416 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1417 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || 1418 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || 1419 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled)) 1420 return -EMSGSIZE; 1421 #endif 1422 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1423 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || 1424 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || 1425 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1426 br->multicast_query_use_ifaddr) || 1427 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || 1428 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, 1429 br->multicast_stats_enabled) || 1430 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, 1431 br->hash_elasticity) || 1432 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1433 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, 1434 br->multicast_last_member_count) || 1435 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, 1436 br->multicast_startup_query_count) || 1437 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, 1438 br->multicast_igmp_version)) 1439 return -EMSGSIZE; 1440 #if IS_ENABLED(CONFIG_IPV6) 1441 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, 1442 br->multicast_mld_version)) 1443 return -EMSGSIZE; 1444 #endif 1445 clockval = jiffies_to_clock_t(br->multicast_last_member_interval); 1446 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, 1447 IFLA_BR_PAD)) 1448 return -EMSGSIZE; 1449 clockval = jiffies_to_clock_t(br->multicast_membership_interval); 1450 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, 1451 IFLA_BR_PAD)) 1452 return -EMSGSIZE; 1453 clockval = jiffies_to_clock_t(br->multicast_querier_interval); 1454 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, 1455 IFLA_BR_PAD)) 1456 return -EMSGSIZE; 1457 clockval = jiffies_to_clock_t(br->multicast_query_interval); 1458 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, 1459 IFLA_BR_PAD)) 1460 return -EMSGSIZE; 1461 clockval = jiffies_to_clock_t(br->multicast_query_response_interval); 1462 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, 1463 IFLA_BR_PAD)) 1464 return -EMSGSIZE; 1465 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); 1466 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, 1467 IFLA_BR_PAD)) 1468 return -EMSGSIZE; 1469 #endif 1470 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1471 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, 1472 br->nf_call_iptables ? 1 : 0) || 1473 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, 1474 br->nf_call_ip6tables ? 1 : 0) || 1475 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, 1476 br->nf_call_arptables ? 1 : 0)) 1477 return -EMSGSIZE; 1478 #endif 1479 1480 return 0; 1481 } 1482 1483 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) 1484 { 1485 struct net_bridge_port *p = NULL; 1486 struct net_bridge_vlan_group *vg; 1487 struct net_bridge_vlan *v; 1488 struct net_bridge *br; 1489 int numvls = 0; 1490 1491 switch (attr) { 1492 case IFLA_STATS_LINK_XSTATS: 1493 br = netdev_priv(dev); 1494 vg = br_vlan_group(br); 1495 break; 1496 case IFLA_STATS_LINK_XSTATS_SLAVE: 1497 p = br_port_get_rtnl(dev); 1498 if (!p) 1499 return 0; 1500 br = p->br; 1501 vg = nbp_vlan_group(p); 1502 break; 1503 default: 1504 return 0; 1505 } 1506 1507 if (vg) { 1508 /* we need to count all, even placeholder entries */ 1509 list_for_each_entry(v, &vg->vlan_list, vlist) 1510 numvls++; 1511 } 1512 1513 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1514 nla_total_size(sizeof(struct br_mcast_stats)) + 1515 nla_total_size(0); 1516 } 1517 1518 static int br_fill_linkxstats(struct sk_buff *skb, 1519 const struct net_device *dev, 1520 int *prividx, int attr) 1521 { 1522 struct nlattr *nla __maybe_unused; 1523 struct net_bridge_port *p = NULL; 1524 struct net_bridge_vlan_group *vg; 1525 struct net_bridge_vlan *v; 1526 struct net_bridge *br; 1527 struct nlattr *nest; 1528 int vl_idx = 0; 1529 1530 switch (attr) { 1531 case IFLA_STATS_LINK_XSTATS: 1532 br = netdev_priv(dev); 1533 vg = br_vlan_group(br); 1534 break; 1535 case IFLA_STATS_LINK_XSTATS_SLAVE: 1536 p = br_port_get_rtnl(dev); 1537 if (!p) 1538 return 0; 1539 br = p->br; 1540 vg = nbp_vlan_group(p); 1541 break; 1542 default: 1543 return -EINVAL; 1544 } 1545 1546 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); 1547 if (!nest) 1548 return -EMSGSIZE; 1549 1550 if (vg) { 1551 u16 pvid; 1552 1553 pvid = br_get_pvid(vg); 1554 list_for_each_entry(v, &vg->vlan_list, vlist) { 1555 struct bridge_vlan_xstats vxi; 1556 struct br_vlan_stats stats; 1557 1558 if (++vl_idx < *prividx) 1559 continue; 1560 memset(&vxi, 0, sizeof(vxi)); 1561 vxi.vid = v->vid; 1562 vxi.flags = v->flags; 1563 if (v->vid == pvid) 1564 vxi.flags |= BRIDGE_VLAN_INFO_PVID; 1565 br_vlan_get_stats(v, &stats); 1566 vxi.rx_bytes = stats.rx_bytes; 1567 vxi.rx_packets = stats.rx_packets; 1568 vxi.tx_bytes = stats.tx_bytes; 1569 vxi.tx_packets = stats.tx_packets; 1570 1571 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1572 goto nla_put_failure; 1573 } 1574 } 1575 1576 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1577 if (++vl_idx >= *prividx) { 1578 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, 1579 sizeof(struct br_mcast_stats), 1580 BRIDGE_XSTATS_PAD); 1581 if (!nla) 1582 goto nla_put_failure; 1583 br_multicast_get_stats(br, p, nla_data(nla)); 1584 } 1585 #endif 1586 nla_nest_end(skb, nest); 1587 *prividx = 0; 1588 1589 return 0; 1590 1591 nla_put_failure: 1592 nla_nest_end(skb, nest); 1593 *prividx = vl_idx; 1594 1595 return -EMSGSIZE; 1596 } 1597 1598 static struct rtnl_af_ops br_af_ops __read_mostly = { 1599 .family = AF_BRIDGE, 1600 .get_link_af_size = br_get_link_af_size_filtered, 1601 }; 1602 1603 struct rtnl_link_ops br_link_ops __read_mostly = { 1604 .kind = "bridge", 1605 .priv_size = sizeof(struct net_bridge), 1606 .setup = br_dev_setup, 1607 .maxtype = IFLA_BR_MAX, 1608 .policy = br_policy, 1609 .validate = br_validate, 1610 .newlink = br_dev_newlink, 1611 .changelink = br_changelink, 1612 .dellink = br_dev_delete, 1613 .get_size = br_get_size, 1614 .fill_info = br_fill_info, 1615 .fill_linkxstats = br_fill_linkxstats, 1616 .get_linkxstats_size = br_get_linkxstats_size, 1617 1618 .slave_maxtype = IFLA_BRPORT_MAX, 1619 .slave_policy = br_port_policy, 1620 .slave_changelink = br_port_slave_changelink, 1621 .get_slave_size = br_port_get_slave_size, 1622 .fill_slave_info = br_port_fill_slave_info, 1623 }; 1624 1625 int __init br_netlink_init(void) 1626 { 1627 int err; 1628 1629 br_mdb_init(); 1630 rtnl_af_register(&br_af_ops); 1631 1632 err = rtnl_link_register(&br_link_ops); 1633 if (err) 1634 goto out_af; 1635 1636 return 0; 1637 1638 out_af: 1639 rtnl_af_unregister(&br_af_ops); 1640 br_mdb_uninit(); 1641 return err; 1642 } 1643 1644 void br_netlink_fini(void) 1645 { 1646 br_mdb_uninit(); 1647 rtnl_af_unregister(&br_af_ops); 1648 rtnl_link_unregister(&br_link_ops); 1649 } 1650