1 /* 2 * Bridge netlink control interface 3 * 4 * Authors: 5 * Stephen Hemminger <shemminger@osdl.org> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/etherdevice.h> 16 #include <net/rtnetlink.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <uapi/linux/if_bridge.h> 20 21 #include "br_private.h" 22 #include "br_private_stp.h" 23 #include "br_private_tunnel.h" 24 25 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, 26 u32 filter_mask) 27 { 28 struct net_bridge_vlan *v; 29 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 30 u16 flags, pvid; 31 int num_vlans = 0; 32 33 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 34 return 0; 35 36 pvid = br_get_pvid(vg); 37 /* Count number of vlan infos */ 38 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 39 flags = 0; 40 /* only a context, bridge vlan not activated */ 41 if (!br_vlan_should_use(v)) 42 continue; 43 if (v->vid == pvid) 44 flags |= BRIDGE_VLAN_INFO_PVID; 45 46 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 47 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 48 49 if (vid_range_start == 0) { 50 goto initvars; 51 } else if ((v->vid - vid_range_end) == 1 && 52 flags == vid_range_flags) { 53 vid_range_end = v->vid; 54 continue; 55 } else { 56 if ((vid_range_end - vid_range_start) > 0) 57 num_vlans += 2; 58 else 59 num_vlans += 1; 60 } 61 initvars: 62 vid_range_start = v->vid; 63 vid_range_end = v->vid; 64 vid_range_flags = flags; 65 } 66 67 if (vid_range_start != 0) { 68 if ((vid_range_end - vid_range_start) > 0) 69 num_vlans += 2; 70 else 71 num_vlans += 1; 72 } 73 74 return num_vlans; 75 } 76 77 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, 78 u32 filter_mask) 79 { 80 int num_vlans; 81 82 if (!vg) 83 return 0; 84 85 if (filter_mask & RTEXT_FILTER_BRVLAN) 86 return vg->num_vlans; 87 88 rcu_read_lock(); 89 num_vlans = __get_num_vlan_infos(vg, filter_mask); 90 rcu_read_unlock(); 91 92 return num_vlans; 93 } 94 95 static size_t br_get_link_af_size_filtered(const struct net_device *dev, 96 u32 filter_mask) 97 { 98 struct net_bridge_vlan_group *vg = NULL; 99 struct net_bridge_port *p = NULL; 100 struct net_bridge *br; 101 int num_vlan_infos; 102 size_t vinfo_sz = 0; 103 104 rcu_read_lock(); 105 if (br_port_exists(dev)) { 106 p = br_port_get_rcu(dev); 107 vg = nbp_vlan_group_rcu(p); 108 } else if (dev->priv_flags & IFF_EBRIDGE) { 109 br = netdev_priv(dev); 110 vg = br_vlan_group_rcu(br); 111 } 112 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); 113 rcu_read_unlock(); 114 115 if (p && (p->flags & BR_VLAN_TUNNEL)) 116 vinfo_sz += br_get_vlan_tunnel_info_size(vg); 117 118 /* Each VLAN is returned in bridge_vlan_info along with flags */ 119 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); 120 121 return vinfo_sz; 122 } 123 124 static inline size_t br_port_info_size(void) 125 { 126 return nla_total_size(1) /* IFLA_BRPORT_STATE */ 127 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ 128 + nla_total_size(4) /* IFLA_BRPORT_COST */ 129 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 130 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 131 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 132 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ 134 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 135 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 136 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ 137 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ 138 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ 139 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ 140 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ 141 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ 142 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ 143 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ 144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ 145 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ 146 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ 147 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ 148 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ 149 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ 150 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ 151 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ 152 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 153 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ 154 #endif 155 + 0; 156 } 157 158 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) 159 { 160 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 161 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 162 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 163 + nla_total_size(4) /* IFLA_MASTER */ 164 + nla_total_size(4) /* IFLA_MTU */ 165 + nla_total_size(4) /* IFLA_LINK */ 166 + nla_total_size(1) /* IFLA_OPERSTATE */ 167 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ 168 + nla_total_size(br_get_link_af_size_filtered(dev, 169 filter_mask)); /* IFLA_AF_SPEC */ 170 } 171 172 static int br_port_fill_attrs(struct sk_buff *skb, 173 const struct net_bridge_port *p) 174 { 175 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); 176 u64 timerval; 177 178 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || 179 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || 180 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || 181 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || 182 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || 183 nla_put_u8(skb, IFLA_BRPORT_PROTECT, 184 !!(p->flags & BR_ROOT_BLOCK)) || 185 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 186 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || 187 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, 188 !!(p->flags & BR_MULTICAST_TO_UNICAST)) || 189 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || 190 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 191 !!(p->flags & BR_FLOOD)) || 192 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, 193 !!(p->flags & BR_MCAST_FLOOD)) || 194 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, 195 !!(p->flags & BR_BCAST_FLOOD)) || 196 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || 197 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, 198 !!(p->flags & BR_PROXYARP_WIFI)) || 199 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), 200 &p->designated_root) || 201 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), 202 &p->designated_bridge) || 203 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || 204 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || 205 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || 206 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || 207 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 208 p->topology_change_ack) || 209 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || 210 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & 211 BR_VLAN_TUNNEL))) 212 return -EMSGSIZE; 213 214 timerval = br_timer_value(&p->message_age_timer); 215 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, 216 IFLA_BRPORT_PAD)) 217 return -EMSGSIZE; 218 timerval = br_timer_value(&p->forward_delay_timer); 219 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, 220 IFLA_BRPORT_PAD)) 221 return -EMSGSIZE; 222 timerval = br_timer_value(&p->hold_timer); 223 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, 224 IFLA_BRPORT_PAD)) 225 return -EMSGSIZE; 226 227 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 228 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, 229 p->multicast_router)) 230 return -EMSGSIZE; 231 #endif 232 233 return 0; 234 } 235 236 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, 237 u16 vid_end, u16 flags) 238 { 239 struct bridge_vlan_info vinfo; 240 241 if ((vid_end - vid_start) > 0) { 242 /* add range to skb */ 243 vinfo.vid = vid_start; 244 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; 245 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 246 sizeof(vinfo), &vinfo)) 247 goto nla_put_failure; 248 249 vinfo.vid = vid_end; 250 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; 251 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 252 sizeof(vinfo), &vinfo)) 253 goto nla_put_failure; 254 } else { 255 vinfo.vid = vid_start; 256 vinfo.flags = flags; 257 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 258 sizeof(vinfo), &vinfo)) 259 goto nla_put_failure; 260 } 261 262 return 0; 263 264 nla_put_failure: 265 return -EMSGSIZE; 266 } 267 268 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, 269 struct net_bridge_vlan_group *vg) 270 { 271 struct net_bridge_vlan *v; 272 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 273 u16 flags, pvid; 274 int err = 0; 275 276 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan 277 * and mark vlan info with begin and end flags 278 * if vlaninfo represents a range 279 */ 280 pvid = br_get_pvid(vg); 281 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 282 flags = 0; 283 if (!br_vlan_should_use(v)) 284 continue; 285 if (v->vid == pvid) 286 flags |= BRIDGE_VLAN_INFO_PVID; 287 288 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 289 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 290 291 if (vid_range_start == 0) { 292 goto initvars; 293 } else if ((v->vid - vid_range_end) == 1 && 294 flags == vid_range_flags) { 295 vid_range_end = v->vid; 296 continue; 297 } else { 298 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 299 vid_range_end, 300 vid_range_flags); 301 if (err) 302 return err; 303 } 304 305 initvars: 306 vid_range_start = v->vid; 307 vid_range_end = v->vid; 308 vid_range_flags = flags; 309 } 310 311 if (vid_range_start != 0) { 312 /* Call it once more to send any left over vlans */ 313 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 314 vid_range_end, 315 vid_range_flags); 316 if (err) 317 return err; 318 } 319 320 return 0; 321 } 322 323 static int br_fill_ifvlaninfo(struct sk_buff *skb, 324 struct net_bridge_vlan_group *vg) 325 { 326 struct bridge_vlan_info vinfo; 327 struct net_bridge_vlan *v; 328 u16 pvid; 329 330 pvid = br_get_pvid(vg); 331 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 332 if (!br_vlan_should_use(v)) 333 continue; 334 335 vinfo.vid = v->vid; 336 vinfo.flags = 0; 337 if (v->vid == pvid) 338 vinfo.flags |= BRIDGE_VLAN_INFO_PVID; 339 340 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 341 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 342 343 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 344 sizeof(vinfo), &vinfo)) 345 goto nla_put_failure; 346 } 347 348 return 0; 349 350 nla_put_failure: 351 return -EMSGSIZE; 352 } 353 354 /* 355 * Create one netlink message for one interface 356 * Contains port and master info as well as carrier and bridge state. 357 */ 358 static int br_fill_ifinfo(struct sk_buff *skb, 359 struct net_bridge_port *port, 360 u32 pid, u32 seq, int event, unsigned int flags, 361 u32 filter_mask, const struct net_device *dev) 362 { 363 struct net_bridge *br; 364 struct ifinfomsg *hdr; 365 struct nlmsghdr *nlh; 366 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 367 368 if (port) 369 br = port->br; 370 else 371 br = netdev_priv(dev); 372 373 br_debug(br, "br_fill_info event %d port %s master %s\n", 374 event, dev->name, br->dev->name); 375 376 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 377 if (nlh == NULL) 378 return -EMSGSIZE; 379 380 hdr = nlmsg_data(nlh); 381 hdr->ifi_family = AF_BRIDGE; 382 hdr->__ifi_pad = 0; 383 hdr->ifi_type = dev->type; 384 hdr->ifi_index = dev->ifindex; 385 hdr->ifi_flags = dev_get_flags(dev); 386 hdr->ifi_change = 0; 387 388 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 389 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || 390 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 391 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 392 (dev->addr_len && 393 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 394 (dev->ifindex != dev_get_iflink(dev) && 395 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 396 goto nla_put_failure; 397 398 if (event == RTM_NEWLINK && port) { 399 struct nlattr *nest 400 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 401 402 if (nest == NULL || br_port_fill_attrs(skb, port) < 0) 403 goto nla_put_failure; 404 nla_nest_end(skb, nest); 405 } 406 407 /* Check if the VID information is requested */ 408 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 409 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 410 struct net_bridge_vlan_group *vg; 411 struct nlattr *af; 412 int err; 413 414 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ 415 rcu_read_lock(); 416 if (port) 417 vg = nbp_vlan_group_rcu(port); 418 else 419 vg = br_vlan_group_rcu(br); 420 421 if (!vg || !vg->num_vlans) { 422 rcu_read_unlock(); 423 goto done; 424 } 425 af = nla_nest_start(skb, IFLA_AF_SPEC); 426 if (!af) { 427 rcu_read_unlock(); 428 goto nla_put_failure; 429 } 430 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 431 err = br_fill_ifvlaninfo_compressed(skb, vg); 432 else 433 err = br_fill_ifvlaninfo(skb, vg); 434 435 if (port && (port->flags & BR_VLAN_TUNNEL)) 436 err = br_fill_vlan_tunnel_info(skb, vg); 437 rcu_read_unlock(); 438 if (err) 439 goto nla_put_failure; 440 nla_nest_end(skb, af); 441 } 442 443 done: 444 nlmsg_end(skb, nlh); 445 return 0; 446 447 nla_put_failure: 448 nlmsg_cancel(skb, nlh); 449 return -EMSGSIZE; 450 } 451 452 /* 453 * Notify listeners of a change in port information 454 */ 455 void br_ifinfo_notify(int event, struct net_bridge_port *port) 456 { 457 struct net *net; 458 struct sk_buff *skb; 459 int err = -ENOBUFS; 460 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; 461 462 if (!port) 463 return; 464 465 net = dev_net(port->dev); 466 br_debug(port->br, "port %u(%s) event %d\n", 467 (unsigned int)port->port_no, port->dev->name, event); 468 469 skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC); 470 if (skb == NULL) 471 goto errout; 472 473 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev); 474 if (err < 0) { 475 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 476 WARN_ON(err == -EMSGSIZE); 477 kfree_skb(skb); 478 goto errout; 479 } 480 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 481 return; 482 errout: 483 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 484 } 485 486 487 /* 488 * Dump information about all ports, in response to GETLINK 489 */ 490 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 491 struct net_device *dev, u32 filter_mask, int nlflags) 492 { 493 struct net_bridge_port *port = br_port_get_rtnl(dev); 494 495 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && 496 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 497 return 0; 498 499 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, 500 filter_mask, dev); 501 } 502 503 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, 504 int cmd, struct bridge_vlan_info *vinfo) 505 { 506 int err = 0; 507 508 switch (cmd) { 509 case RTM_SETLINK: 510 if (p) { 511 /* if the MASTER flag is set this will act on the global 512 * per-VLAN entry as well 513 */ 514 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags); 515 if (err) 516 break; 517 } else { 518 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; 519 err = br_vlan_add(br, vinfo->vid, vinfo->flags); 520 } 521 break; 522 523 case RTM_DELLINK: 524 if (p) { 525 nbp_vlan_delete(p, vinfo->vid); 526 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER) 527 br_vlan_delete(p->br, vinfo->vid); 528 } else { 529 br_vlan_delete(br, vinfo->vid); 530 } 531 break; 532 } 533 534 return err; 535 } 536 537 static int br_process_vlan_info(struct net_bridge *br, 538 struct net_bridge_port *p, int cmd, 539 struct bridge_vlan_info *vinfo_curr, 540 struct bridge_vlan_info **vinfo_last) 541 { 542 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK) 543 return -EINVAL; 544 545 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 546 /* check if we are already processing a range */ 547 if (*vinfo_last) 548 return -EINVAL; 549 *vinfo_last = vinfo_curr; 550 /* don't allow range of pvids */ 551 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID) 552 return -EINVAL; 553 return 0; 554 } 555 556 if (*vinfo_last) { 557 struct bridge_vlan_info tmp_vinfo; 558 int v, err; 559 560 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END)) 561 return -EINVAL; 562 563 if (vinfo_curr->vid <= (*vinfo_last)->vid) 564 return -EINVAL; 565 566 memcpy(&tmp_vinfo, *vinfo_last, 567 sizeof(struct bridge_vlan_info)); 568 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { 569 tmp_vinfo.vid = v; 570 err = br_vlan_info(br, p, cmd, &tmp_vinfo); 571 if (err) 572 break; 573 } 574 *vinfo_last = NULL; 575 576 return 0; 577 } 578 579 return br_vlan_info(br, p, cmd, vinfo_curr); 580 } 581 582 static int br_afspec(struct net_bridge *br, 583 struct net_bridge_port *p, 584 struct nlattr *af_spec, 585 int cmd) 586 { 587 struct bridge_vlan_info *vinfo_curr = NULL; 588 struct bridge_vlan_info *vinfo_last = NULL; 589 struct nlattr *attr; 590 struct vtunnel_info tinfo_last = {}; 591 struct vtunnel_info tinfo_curr = {}; 592 int err = 0, rem; 593 594 nla_for_each_nested(attr, af_spec, rem) { 595 err = 0; 596 switch (nla_type(attr)) { 597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 598 if (!p || !(p->flags & BR_VLAN_TUNNEL)) 599 return -EINVAL; 600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 601 if (err) 602 return err; 603 err = br_process_vlan_tunnel_info(br, p, cmd, 604 &tinfo_curr, 605 &tinfo_last); 606 if (err) 607 return err; 608 break; 609 case IFLA_BRIDGE_VLAN_INFO: 610 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 611 return -EINVAL; 612 vinfo_curr = nla_data(attr); 613 err = br_process_vlan_info(br, p, cmd, vinfo_curr, 614 &vinfo_last); 615 if (err) 616 return err; 617 break; 618 } 619 } 620 621 return err; 622 } 623 624 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { 625 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 626 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 627 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 628 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 629 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 630 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 631 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 632 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 633 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 634 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, 635 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, 636 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, 637 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, 638 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, 639 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, 640 }; 641 642 /* Change the state of the port and notify spanning tree */ 643 static int br_set_port_state(struct net_bridge_port *p, u8 state) 644 { 645 if (state > BR_STATE_BLOCKING) 646 return -EINVAL; 647 648 /* if kernel STP is running, don't allow changes */ 649 if (p->br->stp_enabled == BR_KERNEL_STP) 650 return -EBUSY; 651 652 /* if device is not up, change is not allowed 653 * if link is not present, only allowable state is disabled 654 */ 655 if (!netif_running(p->dev) || 656 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) 657 return -ENETDOWN; 658 659 br_set_state(p, state); 660 br_port_state_selection(p->br); 661 return 0; 662 } 663 664 /* Set/clear or port flags based on attribute */ 665 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], 666 int attrtype, unsigned long mask) 667 { 668 unsigned long flags; 669 int err; 670 671 if (!tb[attrtype]) 672 return 0; 673 674 if (nla_get_u8(tb[attrtype])) 675 flags = p->flags | mask; 676 else 677 flags = p->flags & ~mask; 678 679 err = br_switchdev_set_port_flag(p, flags, mask); 680 if (err) 681 return err; 682 683 p->flags = flags; 684 return 0; 685 } 686 687 /* Process bridge protocol info on port */ 688 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 689 { 690 unsigned long old_flags = p->flags; 691 bool br_vlan_tunnel_old = false; 692 int err; 693 694 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 695 if (err) 696 return err; 697 698 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 699 if (err) 700 return err; 701 702 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 703 if (err) 704 return err; 705 706 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 707 if (err) 708 return err; 709 710 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); 711 if (err) 712 return err; 713 714 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); 715 if (err) 716 return err; 717 718 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); 719 if (err) 720 return err; 721 722 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); 723 if (err) 724 return err; 725 726 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); 727 if (err) 728 return err; 729 730 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); 731 if (err) 732 return err; 733 734 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); 735 if (err) 736 return err; 737 738 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; 739 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); 740 if (err) 741 return err; 742 743 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) 744 nbp_vlan_tunnel_info_flush(p); 745 746 if (tb[IFLA_BRPORT_COST]) { 747 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 748 if (err) 749 return err; 750 } 751 752 if (tb[IFLA_BRPORT_PRIORITY]) { 753 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); 754 if (err) 755 return err; 756 } 757 758 if (tb[IFLA_BRPORT_STATE]) { 759 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); 760 if (err) 761 return err; 762 } 763 764 if (tb[IFLA_BRPORT_FLUSH]) 765 br_fdb_delete_by_port(p->br, p, 0, 0); 766 767 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 768 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { 769 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); 770 771 err = br_multicast_set_port_router(p, mcast_router); 772 if (err) 773 return err; 774 } 775 #endif 776 br_port_flags_change(p, old_flags ^ p->flags); 777 return 0; 778 } 779 780 /* Change state and parameters on port. */ 781 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 782 { 783 struct nlattr *protinfo; 784 struct nlattr *afspec; 785 struct net_bridge_port *p; 786 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 787 int err = 0; 788 789 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); 790 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 791 if (!protinfo && !afspec) 792 return 0; 793 794 p = br_port_get_rtnl(dev); 795 /* We want to accept dev as bridge itself if the AF_SPEC 796 * is set to see if someone is setting vlan info on the bridge 797 */ 798 if (!p && !afspec) 799 return -EINVAL; 800 801 if (p && protinfo) { 802 if (protinfo->nla_type & NLA_F_NESTED) { 803 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, 804 br_port_policy, NULL); 805 if (err) 806 return err; 807 808 spin_lock_bh(&p->br->lock); 809 err = br_setport(p, tb); 810 spin_unlock_bh(&p->br->lock); 811 } else { 812 /* Binary compatibility with old RSTP */ 813 if (nla_len(protinfo) < sizeof(u8)) 814 return -EINVAL; 815 816 spin_lock_bh(&p->br->lock); 817 err = br_set_port_state(p, nla_get_u8(protinfo)); 818 spin_unlock_bh(&p->br->lock); 819 } 820 if (err) 821 goto out; 822 } 823 824 if (afspec) { 825 err = br_afspec((struct net_bridge *)netdev_priv(dev), p, 826 afspec, RTM_SETLINK); 827 } 828 829 if (err == 0) 830 br_ifinfo_notify(RTM_NEWLINK, p); 831 out: 832 return err; 833 } 834 835 /* Delete port information */ 836 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 837 { 838 struct nlattr *afspec; 839 struct net_bridge_port *p; 840 int err = 0; 841 842 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 843 if (!afspec) 844 return 0; 845 846 p = br_port_get_rtnl(dev); 847 /* We want to accept dev as bridge itself as well */ 848 if (!p && !(dev->priv_flags & IFF_EBRIDGE)) 849 return -EINVAL; 850 851 err = br_afspec((struct net_bridge *)netdev_priv(dev), p, 852 afspec, RTM_DELLINK); 853 if (err == 0) 854 /* Send RTM_NEWLINK because userspace 855 * expects RTM_NEWLINK for vlan dels 856 */ 857 br_ifinfo_notify(RTM_NEWLINK, p); 858 859 return err; 860 } 861 862 static int br_validate(struct nlattr *tb[], struct nlattr *data[], 863 struct netlink_ext_ack *extack) 864 { 865 if (tb[IFLA_ADDRESS]) { 866 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 867 return -EINVAL; 868 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 869 return -EADDRNOTAVAIL; 870 } 871 872 if (!data) 873 return 0; 874 875 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 876 if (data[IFLA_BR_VLAN_PROTOCOL]) { 877 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) { 878 case htons(ETH_P_8021Q): 879 case htons(ETH_P_8021AD): 880 break; 881 default: 882 return -EPROTONOSUPPORT; 883 } 884 } 885 886 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 887 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 888 889 if (defpvid >= VLAN_VID_MASK) 890 return -EINVAL; 891 } 892 #endif 893 894 return 0; 895 } 896 897 static int br_port_slave_changelink(struct net_device *brdev, 898 struct net_device *dev, 899 struct nlattr *tb[], 900 struct nlattr *data[], 901 struct netlink_ext_ack *extack) 902 { 903 struct net_bridge *br = netdev_priv(brdev); 904 int ret; 905 906 if (!data) 907 return 0; 908 909 spin_lock_bh(&br->lock); 910 ret = br_setport(br_port_get_rtnl(dev), data); 911 spin_unlock_bh(&br->lock); 912 913 return ret; 914 } 915 916 static int br_port_fill_slave_info(struct sk_buff *skb, 917 const struct net_device *brdev, 918 const struct net_device *dev) 919 { 920 return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); 921 } 922 923 static size_t br_port_get_slave_size(const struct net_device *brdev, 924 const struct net_device *dev) 925 { 926 return br_port_info_size(); 927 } 928 929 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { 930 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, 931 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, 932 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, 933 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, 934 [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, 935 [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, 936 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, 937 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, 938 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, 939 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, 940 .len = ETH_ALEN }, 941 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, 942 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, 943 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, 944 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, 945 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, 946 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, 947 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 948 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 949 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 950 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 951 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 952 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 953 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 954 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 955 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, 956 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, 957 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 958 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 959 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 960 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, 961 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 962 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 963 }; 964 965 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 966 struct nlattr *data[], 967 struct netlink_ext_ack *extack) 968 { 969 struct net_bridge *br = netdev_priv(brdev); 970 int err; 971 972 if (!data) 973 return 0; 974 975 if (data[IFLA_BR_FORWARD_DELAY]) { 976 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); 977 if (err) 978 return err; 979 } 980 981 if (data[IFLA_BR_HELLO_TIME]) { 982 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); 983 if (err) 984 return err; 985 } 986 987 if (data[IFLA_BR_MAX_AGE]) { 988 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); 989 if (err) 990 return err; 991 } 992 993 if (data[IFLA_BR_AGEING_TIME]) { 994 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); 995 if (err) 996 return err; 997 } 998 999 if (data[IFLA_BR_STP_STATE]) { 1000 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); 1001 1002 br_stp_set_enabled(br, stp_enabled); 1003 } 1004 1005 if (data[IFLA_BR_PRIORITY]) { 1006 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); 1007 1008 br_stp_set_bridge_priority(br, priority); 1009 } 1010 1011 if (data[IFLA_BR_VLAN_FILTERING]) { 1012 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); 1013 1014 err = __br_vlan_filter_toggle(br, vlan_filter); 1015 if (err) 1016 return err; 1017 } 1018 1019 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1020 if (data[IFLA_BR_VLAN_PROTOCOL]) { 1021 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); 1022 1023 err = __br_vlan_set_proto(br, vlan_proto); 1024 if (err) 1025 return err; 1026 } 1027 1028 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1029 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1030 1031 err = __br_vlan_set_default_pvid(br, defpvid); 1032 if (err) 1033 return err; 1034 } 1035 1036 if (data[IFLA_BR_VLAN_STATS_ENABLED]) { 1037 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); 1038 1039 err = br_vlan_set_stats(br, vlan_stats); 1040 if (err) 1041 return err; 1042 } 1043 #endif 1044 1045 if (data[IFLA_BR_GROUP_FWD_MASK]) { 1046 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); 1047 1048 if (fwd_mask & BR_GROUPFWD_RESTRICTED) 1049 return -EINVAL; 1050 br->group_fwd_mask = fwd_mask; 1051 } 1052 1053 if (data[IFLA_BR_GROUP_ADDR]) { 1054 u8 new_addr[ETH_ALEN]; 1055 1056 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) 1057 return -EINVAL; 1058 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); 1059 if (!is_link_local_ether_addr(new_addr)) 1060 return -EINVAL; 1061 if (new_addr[5] == 1 || /* 802.3x Pause address */ 1062 new_addr[5] == 2 || /* 802.3ad Slow protocols */ 1063 new_addr[5] == 3) /* 802.1X PAE address */ 1064 return -EINVAL; 1065 spin_lock_bh(&br->lock); 1066 memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); 1067 spin_unlock_bh(&br->lock); 1068 br->group_addr_set = true; 1069 br_recalculate_fwd_mask(br); 1070 } 1071 1072 if (data[IFLA_BR_FDB_FLUSH]) 1073 br_fdb_flush(br); 1074 1075 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1076 if (data[IFLA_BR_MCAST_ROUTER]) { 1077 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); 1078 1079 err = br_multicast_set_router(br, multicast_router); 1080 if (err) 1081 return err; 1082 } 1083 1084 if (data[IFLA_BR_MCAST_SNOOPING]) { 1085 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); 1086 1087 err = br_multicast_toggle(br, mcast_snooping); 1088 if (err) 1089 return err; 1090 } 1091 1092 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { 1093 u8 val; 1094 1095 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); 1096 br->multicast_query_use_ifaddr = !!val; 1097 } 1098 1099 if (data[IFLA_BR_MCAST_QUERIER]) { 1100 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); 1101 1102 err = br_multicast_set_querier(br, mcast_querier); 1103 if (err) 1104 return err; 1105 } 1106 1107 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) { 1108 u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]); 1109 1110 br->hash_elasticity = val; 1111 } 1112 1113 if (data[IFLA_BR_MCAST_HASH_MAX]) { 1114 u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); 1115 1116 err = br_multicast_set_hash_max(br, hash_max); 1117 if (err) 1118 return err; 1119 } 1120 1121 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { 1122 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); 1123 1124 br->multicast_last_member_count = val; 1125 } 1126 1127 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { 1128 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); 1129 1130 br->multicast_startup_query_count = val; 1131 } 1132 1133 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { 1134 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); 1135 1136 br->multicast_last_member_interval = clock_t_to_jiffies(val); 1137 } 1138 1139 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { 1140 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); 1141 1142 br->multicast_membership_interval = clock_t_to_jiffies(val); 1143 } 1144 1145 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { 1146 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); 1147 1148 br->multicast_querier_interval = clock_t_to_jiffies(val); 1149 } 1150 1151 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1152 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1153 1154 br->multicast_query_interval = clock_t_to_jiffies(val); 1155 } 1156 1157 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { 1158 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); 1159 1160 br->multicast_query_response_interval = clock_t_to_jiffies(val); 1161 } 1162 1163 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1164 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1165 1166 br->multicast_startup_query_interval = clock_t_to_jiffies(val); 1167 } 1168 1169 if (data[IFLA_BR_MCAST_STATS_ENABLED]) { 1170 __u8 mcast_stats; 1171 1172 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); 1173 br->multicast_stats_enabled = !!mcast_stats; 1174 } 1175 1176 if (data[IFLA_BR_MCAST_IGMP_VERSION]) { 1177 __u8 igmp_version; 1178 1179 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); 1180 err = br_multicast_set_igmp_version(br, igmp_version); 1181 if (err) 1182 return err; 1183 } 1184 1185 #if IS_ENABLED(CONFIG_IPV6) 1186 if (data[IFLA_BR_MCAST_MLD_VERSION]) { 1187 __u8 mld_version; 1188 1189 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); 1190 err = br_multicast_set_mld_version(br, mld_version); 1191 if (err) 1192 return err; 1193 } 1194 #endif 1195 #endif 1196 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1197 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1198 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); 1199 1200 br->nf_call_iptables = val ? true : false; 1201 } 1202 1203 if (data[IFLA_BR_NF_CALL_IP6TABLES]) { 1204 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); 1205 1206 br->nf_call_ip6tables = val ? true : false; 1207 } 1208 1209 if (data[IFLA_BR_NF_CALL_ARPTABLES]) { 1210 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); 1211 1212 br->nf_call_arptables = val ? true : false; 1213 } 1214 #endif 1215 1216 return 0; 1217 } 1218 1219 static int br_dev_newlink(struct net *src_net, struct net_device *dev, 1220 struct nlattr *tb[], struct nlattr *data[], 1221 struct netlink_ext_ack *extack) 1222 { 1223 struct net_bridge *br = netdev_priv(dev); 1224 int err; 1225 1226 if (tb[IFLA_ADDRESS]) { 1227 spin_lock_bh(&br->lock); 1228 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1229 spin_unlock_bh(&br->lock); 1230 } 1231 1232 err = register_netdevice(dev); 1233 if (err) 1234 return err; 1235 1236 err = br_changelink(dev, tb, data, extack); 1237 if (err) 1238 unregister_netdevice(dev); 1239 return err; 1240 } 1241 1242 static size_t br_get_size(const struct net_device *brdev) 1243 { 1244 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1245 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ 1246 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ 1247 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ 1248 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ 1249 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 1250 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 1251 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1252 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ 1253 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ 1254 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ 1255 #endif 1256 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ 1257 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ 1258 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ 1259 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ 1260 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ 1261 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ 1262 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ 1263 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ 1264 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ 1265 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ 1266 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ 1267 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ 1268 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1269 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ 1270 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1271 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1272 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1273 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ 1274 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1275 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1276 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1277 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ 1278 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ 1279 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ 1280 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ 1281 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ 1282 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ 1283 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ 1284 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ 1285 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ 1286 #endif 1287 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1288 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ 1289 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ 1290 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ 1291 #endif 1292 0; 1293 } 1294 1295 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) 1296 { 1297 struct net_bridge *br = netdev_priv(brdev); 1298 u32 forward_delay = jiffies_to_clock_t(br->forward_delay); 1299 u32 hello_time = jiffies_to_clock_t(br->hello_time); 1300 u32 age_time = jiffies_to_clock_t(br->max_age); 1301 u32 ageing_time = jiffies_to_clock_t(br->ageing_time); 1302 u32 stp_enabled = br->stp_enabled; 1303 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; 1304 u8 vlan_enabled = br_vlan_enabled(br->dev); 1305 u64 clockval; 1306 1307 clockval = br_timer_value(&br->hello_timer); 1308 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) 1309 return -EMSGSIZE; 1310 clockval = br_timer_value(&br->tcn_timer); 1311 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) 1312 return -EMSGSIZE; 1313 clockval = br_timer_value(&br->topology_change_timer); 1314 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, 1315 IFLA_BR_PAD)) 1316 return -EMSGSIZE; 1317 clockval = br_timer_value(&br->gc_work.timer); 1318 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) 1319 return -EMSGSIZE; 1320 1321 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || 1322 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || 1323 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || 1324 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || 1325 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || 1326 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || 1327 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || 1328 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || 1329 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), 1330 &br->bridge_id) || 1331 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), 1332 &br->designated_root) || 1333 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || 1334 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || 1335 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || 1336 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 1337 br->topology_change_detected) || 1338 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr)) 1339 return -EMSGSIZE; 1340 1341 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1342 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || 1343 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || 1344 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled)) 1345 return -EMSGSIZE; 1346 #endif 1347 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1348 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || 1349 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || 1350 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1351 br->multicast_query_use_ifaddr) || 1352 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || 1353 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, 1354 br->multicast_stats_enabled) || 1355 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, 1356 br->hash_elasticity) || 1357 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1358 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, 1359 br->multicast_last_member_count) || 1360 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, 1361 br->multicast_startup_query_count) || 1362 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, 1363 br->multicast_igmp_version)) 1364 return -EMSGSIZE; 1365 #if IS_ENABLED(CONFIG_IPV6) 1366 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, 1367 br->multicast_mld_version)) 1368 return -EMSGSIZE; 1369 #endif 1370 clockval = jiffies_to_clock_t(br->multicast_last_member_interval); 1371 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, 1372 IFLA_BR_PAD)) 1373 return -EMSGSIZE; 1374 clockval = jiffies_to_clock_t(br->multicast_membership_interval); 1375 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, 1376 IFLA_BR_PAD)) 1377 return -EMSGSIZE; 1378 clockval = jiffies_to_clock_t(br->multicast_querier_interval); 1379 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, 1380 IFLA_BR_PAD)) 1381 return -EMSGSIZE; 1382 clockval = jiffies_to_clock_t(br->multicast_query_interval); 1383 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, 1384 IFLA_BR_PAD)) 1385 return -EMSGSIZE; 1386 clockval = jiffies_to_clock_t(br->multicast_query_response_interval); 1387 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, 1388 IFLA_BR_PAD)) 1389 return -EMSGSIZE; 1390 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); 1391 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, 1392 IFLA_BR_PAD)) 1393 return -EMSGSIZE; 1394 #endif 1395 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1396 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, 1397 br->nf_call_iptables ? 1 : 0) || 1398 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, 1399 br->nf_call_ip6tables ? 1 : 0) || 1400 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, 1401 br->nf_call_arptables ? 1 : 0)) 1402 return -EMSGSIZE; 1403 #endif 1404 1405 return 0; 1406 } 1407 1408 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) 1409 { 1410 struct net_bridge_port *p = NULL; 1411 struct net_bridge_vlan_group *vg; 1412 struct net_bridge_vlan *v; 1413 struct net_bridge *br; 1414 int numvls = 0; 1415 1416 switch (attr) { 1417 case IFLA_STATS_LINK_XSTATS: 1418 br = netdev_priv(dev); 1419 vg = br_vlan_group(br); 1420 break; 1421 case IFLA_STATS_LINK_XSTATS_SLAVE: 1422 p = br_port_get_rtnl(dev); 1423 if (!p) 1424 return 0; 1425 br = p->br; 1426 vg = nbp_vlan_group(p); 1427 break; 1428 default: 1429 return 0; 1430 } 1431 1432 if (vg) { 1433 /* we need to count all, even placeholder entries */ 1434 list_for_each_entry(v, &vg->vlan_list, vlist) 1435 numvls++; 1436 } 1437 1438 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1439 nla_total_size(sizeof(struct br_mcast_stats)) + 1440 nla_total_size(0); 1441 } 1442 1443 static int br_fill_linkxstats(struct sk_buff *skb, 1444 const struct net_device *dev, 1445 int *prividx, int attr) 1446 { 1447 struct nlattr *nla __maybe_unused; 1448 struct net_bridge_port *p = NULL; 1449 struct net_bridge_vlan_group *vg; 1450 struct net_bridge_vlan *v; 1451 struct net_bridge *br; 1452 struct nlattr *nest; 1453 int vl_idx = 0; 1454 1455 switch (attr) { 1456 case IFLA_STATS_LINK_XSTATS: 1457 br = netdev_priv(dev); 1458 vg = br_vlan_group(br); 1459 break; 1460 case IFLA_STATS_LINK_XSTATS_SLAVE: 1461 p = br_port_get_rtnl(dev); 1462 if (!p) 1463 return 0; 1464 br = p->br; 1465 vg = nbp_vlan_group(p); 1466 break; 1467 default: 1468 return -EINVAL; 1469 } 1470 1471 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); 1472 if (!nest) 1473 return -EMSGSIZE; 1474 1475 if (vg) { 1476 u16 pvid; 1477 1478 pvid = br_get_pvid(vg); 1479 list_for_each_entry(v, &vg->vlan_list, vlist) { 1480 struct bridge_vlan_xstats vxi; 1481 struct br_vlan_stats stats; 1482 1483 if (++vl_idx < *prividx) 1484 continue; 1485 memset(&vxi, 0, sizeof(vxi)); 1486 vxi.vid = v->vid; 1487 vxi.flags = v->flags; 1488 if (v->vid == pvid) 1489 vxi.flags |= BRIDGE_VLAN_INFO_PVID; 1490 br_vlan_get_stats(v, &stats); 1491 vxi.rx_bytes = stats.rx_bytes; 1492 vxi.rx_packets = stats.rx_packets; 1493 vxi.tx_bytes = stats.tx_bytes; 1494 vxi.tx_packets = stats.tx_packets; 1495 1496 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1497 goto nla_put_failure; 1498 } 1499 } 1500 1501 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1502 if (++vl_idx >= *prividx) { 1503 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, 1504 sizeof(struct br_mcast_stats), 1505 BRIDGE_XSTATS_PAD); 1506 if (!nla) 1507 goto nla_put_failure; 1508 br_multicast_get_stats(br, p, nla_data(nla)); 1509 } 1510 #endif 1511 nla_nest_end(skb, nest); 1512 *prividx = 0; 1513 1514 return 0; 1515 1516 nla_put_failure: 1517 nla_nest_end(skb, nest); 1518 *prividx = vl_idx; 1519 1520 return -EMSGSIZE; 1521 } 1522 1523 static struct rtnl_af_ops br_af_ops __read_mostly = { 1524 .family = AF_BRIDGE, 1525 .get_link_af_size = br_get_link_af_size_filtered, 1526 }; 1527 1528 struct rtnl_link_ops br_link_ops __read_mostly = { 1529 .kind = "bridge", 1530 .priv_size = sizeof(struct net_bridge), 1531 .setup = br_dev_setup, 1532 .maxtype = IFLA_BR_MAX, 1533 .policy = br_policy, 1534 .validate = br_validate, 1535 .newlink = br_dev_newlink, 1536 .changelink = br_changelink, 1537 .dellink = br_dev_delete, 1538 .get_size = br_get_size, 1539 .fill_info = br_fill_info, 1540 .fill_linkxstats = br_fill_linkxstats, 1541 .get_linkxstats_size = br_get_linkxstats_size, 1542 1543 .slave_maxtype = IFLA_BRPORT_MAX, 1544 .slave_policy = br_port_policy, 1545 .slave_changelink = br_port_slave_changelink, 1546 .get_slave_size = br_port_get_slave_size, 1547 .fill_slave_info = br_port_fill_slave_info, 1548 }; 1549 1550 int __init br_netlink_init(void) 1551 { 1552 int err; 1553 1554 br_mdb_init(); 1555 rtnl_af_register(&br_af_ops); 1556 1557 err = rtnl_link_register(&br_link_ops); 1558 if (err) 1559 goto out_af; 1560 1561 return 0; 1562 1563 out_af: 1564 rtnl_af_unregister(&br_af_ops); 1565 br_mdb_uninit(); 1566 return err; 1567 } 1568 1569 void br_netlink_fini(void) 1570 { 1571 br_mdb_uninit(); 1572 rtnl_af_unregister(&br_af_ops); 1573 rtnl_link_unregister(&br_link_ops); 1574 } 1575