1 /* 2 * Bridge netlink control interface 3 * 4 * Authors: 5 * Stephen Hemminger <shemminger@osdl.org> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/etherdevice.h> 16 #include <net/rtnetlink.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <uapi/linux/if_bridge.h> 20 21 #include "br_private.h" 22 #include "br_private_stp.h" 23 #include "br_private_tunnel.h" 24 25 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, 26 u32 filter_mask) 27 { 28 struct net_bridge_vlan *v; 29 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 30 u16 flags, pvid; 31 int num_vlans = 0; 32 33 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 34 return 0; 35 36 pvid = br_get_pvid(vg); 37 /* Count number of vlan infos */ 38 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 39 flags = 0; 40 /* only a context, bridge vlan not activated */ 41 if (!br_vlan_should_use(v)) 42 continue; 43 if (v->vid == pvid) 44 flags |= BRIDGE_VLAN_INFO_PVID; 45 46 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 47 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 48 49 if (vid_range_start == 0) { 50 goto initvars; 51 } else if ((v->vid - vid_range_end) == 1 && 52 flags == vid_range_flags) { 53 vid_range_end = v->vid; 54 continue; 55 } else { 56 if ((vid_range_end - vid_range_start) > 0) 57 num_vlans += 2; 58 else 59 num_vlans += 1; 60 } 61 initvars: 62 vid_range_start = v->vid; 63 vid_range_end = v->vid; 64 vid_range_flags = flags; 65 } 66 67 if (vid_range_start != 0) { 68 if ((vid_range_end - vid_range_start) > 0) 69 num_vlans += 2; 70 else 71 num_vlans += 1; 72 } 73 74 return num_vlans; 75 } 76 77 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, 78 u32 filter_mask) 79 { 80 int num_vlans; 81 82 if (!vg) 83 return 0; 84 85 if (filter_mask & RTEXT_FILTER_BRVLAN) 86 return vg->num_vlans; 87 88 rcu_read_lock(); 89 num_vlans = __get_num_vlan_infos(vg, filter_mask); 90 rcu_read_unlock(); 91 92 return num_vlans; 93 } 94 95 static size_t br_get_link_af_size_filtered(const struct net_device *dev, 96 u32 filter_mask) 97 { 98 struct net_bridge_vlan_group *vg = NULL; 99 struct net_bridge_port *p = NULL; 100 struct net_bridge *br; 101 int num_vlan_infos; 102 size_t vinfo_sz = 0; 103 104 rcu_read_lock(); 105 if (br_port_exists(dev)) { 106 p = br_port_get_rcu(dev); 107 vg = nbp_vlan_group_rcu(p); 108 } else if (dev->priv_flags & IFF_EBRIDGE) { 109 br = netdev_priv(dev); 110 vg = br_vlan_group_rcu(br); 111 } 112 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); 113 rcu_read_unlock(); 114 115 if (p && (p->flags & BR_VLAN_TUNNEL)) 116 vinfo_sz += br_get_vlan_tunnel_info_size(vg); 117 118 /* Each VLAN is returned in bridge_vlan_info along with flags */ 119 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); 120 121 return vinfo_sz; 122 } 123 124 static inline size_t br_port_info_size(void) 125 { 126 return nla_total_size(1) /* IFLA_BRPORT_STATE */ 127 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ 128 + nla_total_size(4) /* IFLA_BRPORT_COST */ 129 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 130 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 131 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 132 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ 134 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 135 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 136 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ 137 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ 138 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ 139 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ 140 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ 141 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ 142 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ 143 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ 144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ 145 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ 146 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ 147 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ 148 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ 149 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ 150 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ 151 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ 152 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 153 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ 154 #endif 155 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ 156 + 0; 157 } 158 159 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) 160 { 161 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 162 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 163 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 164 + nla_total_size(4) /* IFLA_MASTER */ 165 + nla_total_size(4) /* IFLA_MTU */ 166 + nla_total_size(4) /* IFLA_LINK */ 167 + nla_total_size(1) /* IFLA_OPERSTATE */ 168 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ 169 + nla_total_size(br_get_link_af_size_filtered(dev, 170 filter_mask)); /* IFLA_AF_SPEC */ 171 } 172 173 static int br_port_fill_attrs(struct sk_buff *skb, 174 const struct net_bridge_port *p) 175 { 176 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); 177 u64 timerval; 178 179 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || 180 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || 181 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || 182 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || 183 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || 184 nla_put_u8(skb, IFLA_BRPORT_PROTECT, 185 !!(p->flags & BR_ROOT_BLOCK)) || 186 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 187 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || 188 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, 189 !!(p->flags & BR_MULTICAST_TO_UNICAST)) || 190 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || 191 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 192 !!(p->flags & BR_FLOOD)) || 193 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, 194 !!(p->flags & BR_MCAST_FLOOD)) || 195 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, 196 !!(p->flags & BR_BCAST_FLOOD)) || 197 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || 198 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, 199 !!(p->flags & BR_PROXYARP_WIFI)) || 200 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), 201 &p->designated_root) || 202 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), 203 &p->designated_bridge) || 204 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || 205 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || 206 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || 207 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || 208 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 209 p->topology_change_ack) || 210 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || 211 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & 212 BR_VLAN_TUNNEL)) || 213 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask)) 214 return -EMSGSIZE; 215 216 timerval = br_timer_value(&p->message_age_timer); 217 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, 218 IFLA_BRPORT_PAD)) 219 return -EMSGSIZE; 220 timerval = br_timer_value(&p->forward_delay_timer); 221 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, 222 IFLA_BRPORT_PAD)) 223 return -EMSGSIZE; 224 timerval = br_timer_value(&p->hold_timer); 225 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, 226 IFLA_BRPORT_PAD)) 227 return -EMSGSIZE; 228 229 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 230 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, 231 p->multicast_router)) 232 return -EMSGSIZE; 233 #endif 234 235 return 0; 236 } 237 238 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, 239 u16 vid_end, u16 flags) 240 { 241 struct bridge_vlan_info vinfo; 242 243 if ((vid_end - vid_start) > 0) { 244 /* add range to skb */ 245 vinfo.vid = vid_start; 246 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; 247 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 248 sizeof(vinfo), &vinfo)) 249 goto nla_put_failure; 250 251 vinfo.vid = vid_end; 252 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; 253 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 254 sizeof(vinfo), &vinfo)) 255 goto nla_put_failure; 256 } else { 257 vinfo.vid = vid_start; 258 vinfo.flags = flags; 259 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 260 sizeof(vinfo), &vinfo)) 261 goto nla_put_failure; 262 } 263 264 return 0; 265 266 nla_put_failure: 267 return -EMSGSIZE; 268 } 269 270 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, 271 struct net_bridge_vlan_group *vg) 272 { 273 struct net_bridge_vlan *v; 274 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 275 u16 flags, pvid; 276 int err = 0; 277 278 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan 279 * and mark vlan info with begin and end flags 280 * if vlaninfo represents a range 281 */ 282 pvid = br_get_pvid(vg); 283 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 284 flags = 0; 285 if (!br_vlan_should_use(v)) 286 continue; 287 if (v->vid == pvid) 288 flags |= BRIDGE_VLAN_INFO_PVID; 289 290 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 291 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 292 293 if (vid_range_start == 0) { 294 goto initvars; 295 } else if ((v->vid - vid_range_end) == 1 && 296 flags == vid_range_flags) { 297 vid_range_end = v->vid; 298 continue; 299 } else { 300 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 301 vid_range_end, 302 vid_range_flags); 303 if (err) 304 return err; 305 } 306 307 initvars: 308 vid_range_start = v->vid; 309 vid_range_end = v->vid; 310 vid_range_flags = flags; 311 } 312 313 if (vid_range_start != 0) { 314 /* Call it once more to send any left over vlans */ 315 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 316 vid_range_end, 317 vid_range_flags); 318 if (err) 319 return err; 320 } 321 322 return 0; 323 } 324 325 static int br_fill_ifvlaninfo(struct sk_buff *skb, 326 struct net_bridge_vlan_group *vg) 327 { 328 struct bridge_vlan_info vinfo; 329 struct net_bridge_vlan *v; 330 u16 pvid; 331 332 pvid = br_get_pvid(vg); 333 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 334 if (!br_vlan_should_use(v)) 335 continue; 336 337 vinfo.vid = v->vid; 338 vinfo.flags = 0; 339 if (v->vid == pvid) 340 vinfo.flags |= BRIDGE_VLAN_INFO_PVID; 341 342 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 343 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 344 345 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 346 sizeof(vinfo), &vinfo)) 347 goto nla_put_failure; 348 } 349 350 return 0; 351 352 nla_put_failure: 353 return -EMSGSIZE; 354 } 355 356 /* 357 * Create one netlink message for one interface 358 * Contains port and master info as well as carrier and bridge state. 359 */ 360 static int br_fill_ifinfo(struct sk_buff *skb, 361 struct net_bridge_port *port, 362 u32 pid, u32 seq, int event, unsigned int flags, 363 u32 filter_mask, const struct net_device *dev) 364 { 365 struct net_bridge *br; 366 struct ifinfomsg *hdr; 367 struct nlmsghdr *nlh; 368 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 369 370 if (port) 371 br = port->br; 372 else 373 br = netdev_priv(dev); 374 375 br_debug(br, "br_fill_info event %d port %s master %s\n", 376 event, dev->name, br->dev->name); 377 378 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 379 if (nlh == NULL) 380 return -EMSGSIZE; 381 382 hdr = nlmsg_data(nlh); 383 hdr->ifi_family = AF_BRIDGE; 384 hdr->__ifi_pad = 0; 385 hdr->ifi_type = dev->type; 386 hdr->ifi_index = dev->ifindex; 387 hdr->ifi_flags = dev_get_flags(dev); 388 hdr->ifi_change = 0; 389 390 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 391 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || 392 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 393 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 394 (dev->addr_len && 395 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 396 (dev->ifindex != dev_get_iflink(dev) && 397 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 398 goto nla_put_failure; 399 400 if (event == RTM_NEWLINK && port) { 401 struct nlattr *nest 402 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 403 404 if (nest == NULL || br_port_fill_attrs(skb, port) < 0) 405 goto nla_put_failure; 406 nla_nest_end(skb, nest); 407 } 408 409 /* Check if the VID information is requested */ 410 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 411 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 412 struct net_bridge_vlan_group *vg; 413 struct nlattr *af; 414 int err; 415 416 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ 417 rcu_read_lock(); 418 if (port) 419 vg = nbp_vlan_group_rcu(port); 420 else 421 vg = br_vlan_group_rcu(br); 422 423 if (!vg || !vg->num_vlans) { 424 rcu_read_unlock(); 425 goto done; 426 } 427 af = nla_nest_start(skb, IFLA_AF_SPEC); 428 if (!af) { 429 rcu_read_unlock(); 430 goto nla_put_failure; 431 } 432 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 433 err = br_fill_ifvlaninfo_compressed(skb, vg); 434 else 435 err = br_fill_ifvlaninfo(skb, vg); 436 437 if (port && (port->flags & BR_VLAN_TUNNEL)) 438 err = br_fill_vlan_tunnel_info(skb, vg); 439 rcu_read_unlock(); 440 if (err) 441 goto nla_put_failure; 442 nla_nest_end(skb, af); 443 } 444 445 done: 446 nlmsg_end(skb, nlh); 447 return 0; 448 449 nla_put_failure: 450 nlmsg_cancel(skb, nlh); 451 return -EMSGSIZE; 452 } 453 454 /* 455 * Notify listeners of a change in port information 456 */ 457 void br_ifinfo_notify(int event, struct net_bridge_port *port) 458 { 459 struct net *net; 460 struct sk_buff *skb; 461 int err = -ENOBUFS; 462 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; 463 464 if (!port) 465 return; 466 467 net = dev_net(port->dev); 468 br_debug(port->br, "port %u(%s) event %d\n", 469 (unsigned int)port->port_no, port->dev->name, event); 470 471 skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC); 472 if (skb == NULL) 473 goto errout; 474 475 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev); 476 if (err < 0) { 477 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 478 WARN_ON(err == -EMSGSIZE); 479 kfree_skb(skb); 480 goto errout; 481 } 482 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 483 return; 484 errout: 485 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 486 } 487 488 489 /* 490 * Dump information about all ports, in response to GETLINK 491 */ 492 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 493 struct net_device *dev, u32 filter_mask, int nlflags) 494 { 495 struct net_bridge_port *port = br_port_get_rtnl(dev); 496 497 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && 498 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 499 return 0; 500 501 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, 502 filter_mask, dev); 503 } 504 505 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, 506 int cmd, struct bridge_vlan_info *vinfo) 507 { 508 int err = 0; 509 510 switch (cmd) { 511 case RTM_SETLINK: 512 if (p) { 513 /* if the MASTER flag is set this will act on the global 514 * per-VLAN entry as well 515 */ 516 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags); 517 if (err) 518 break; 519 } else { 520 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; 521 err = br_vlan_add(br, vinfo->vid, vinfo->flags); 522 } 523 break; 524 525 case RTM_DELLINK: 526 if (p) { 527 nbp_vlan_delete(p, vinfo->vid); 528 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER) 529 br_vlan_delete(p->br, vinfo->vid); 530 } else { 531 br_vlan_delete(br, vinfo->vid); 532 } 533 break; 534 } 535 536 return err; 537 } 538 539 static int br_process_vlan_info(struct net_bridge *br, 540 struct net_bridge_port *p, int cmd, 541 struct bridge_vlan_info *vinfo_curr, 542 struct bridge_vlan_info **vinfo_last) 543 { 544 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK) 545 return -EINVAL; 546 547 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 548 /* check if we are already processing a range */ 549 if (*vinfo_last) 550 return -EINVAL; 551 *vinfo_last = vinfo_curr; 552 /* don't allow range of pvids */ 553 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID) 554 return -EINVAL; 555 return 0; 556 } 557 558 if (*vinfo_last) { 559 struct bridge_vlan_info tmp_vinfo; 560 int v, err; 561 562 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END)) 563 return -EINVAL; 564 565 if (vinfo_curr->vid <= (*vinfo_last)->vid) 566 return -EINVAL; 567 568 memcpy(&tmp_vinfo, *vinfo_last, 569 sizeof(struct bridge_vlan_info)); 570 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { 571 tmp_vinfo.vid = v; 572 err = br_vlan_info(br, p, cmd, &tmp_vinfo); 573 if (err) 574 break; 575 } 576 *vinfo_last = NULL; 577 578 return 0; 579 } 580 581 return br_vlan_info(br, p, cmd, vinfo_curr); 582 } 583 584 static int br_afspec(struct net_bridge *br, 585 struct net_bridge_port *p, 586 struct nlattr *af_spec, 587 int cmd) 588 { 589 struct bridge_vlan_info *vinfo_curr = NULL; 590 struct bridge_vlan_info *vinfo_last = NULL; 591 struct nlattr *attr; 592 struct vtunnel_info tinfo_last = {}; 593 struct vtunnel_info tinfo_curr = {}; 594 int err = 0, rem; 595 596 nla_for_each_nested(attr, af_spec, rem) { 597 err = 0; 598 switch (nla_type(attr)) { 599 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 600 if (!p || !(p->flags & BR_VLAN_TUNNEL)) 601 return -EINVAL; 602 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 603 if (err) 604 return err; 605 err = br_process_vlan_tunnel_info(br, p, cmd, 606 &tinfo_curr, 607 &tinfo_last); 608 if (err) 609 return err; 610 break; 611 case IFLA_BRIDGE_VLAN_INFO: 612 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 613 return -EINVAL; 614 vinfo_curr = nla_data(attr); 615 err = br_process_vlan_info(br, p, cmd, vinfo_curr, 616 &vinfo_last); 617 if (err) 618 return err; 619 break; 620 } 621 } 622 623 return err; 624 } 625 626 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { 627 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 628 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 629 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 630 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 631 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 632 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 633 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 634 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 635 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 636 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, 637 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, 638 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, 639 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, 640 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, 641 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, 642 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, 643 }; 644 645 /* Change the state of the port and notify spanning tree */ 646 static int br_set_port_state(struct net_bridge_port *p, u8 state) 647 { 648 if (state > BR_STATE_BLOCKING) 649 return -EINVAL; 650 651 /* if kernel STP is running, don't allow changes */ 652 if (p->br->stp_enabled == BR_KERNEL_STP) 653 return -EBUSY; 654 655 /* if device is not up, change is not allowed 656 * if link is not present, only allowable state is disabled 657 */ 658 if (!netif_running(p->dev) || 659 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) 660 return -ENETDOWN; 661 662 br_set_state(p, state); 663 br_port_state_selection(p->br); 664 return 0; 665 } 666 667 /* Set/clear or port flags based on attribute */ 668 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], 669 int attrtype, unsigned long mask) 670 { 671 unsigned long flags; 672 int err; 673 674 if (!tb[attrtype]) 675 return 0; 676 677 if (nla_get_u8(tb[attrtype])) 678 flags = p->flags | mask; 679 else 680 flags = p->flags & ~mask; 681 682 err = br_switchdev_set_port_flag(p, flags, mask); 683 if (err) 684 return err; 685 686 p->flags = flags; 687 return 0; 688 } 689 690 /* Process bridge protocol info on port */ 691 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 692 { 693 unsigned long old_flags = p->flags; 694 bool br_vlan_tunnel_old = false; 695 int err; 696 697 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 698 if (err) 699 return err; 700 701 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 702 if (err) 703 return err; 704 705 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 706 if (err) 707 return err; 708 709 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 710 if (err) 711 return err; 712 713 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); 714 if (err) 715 return err; 716 717 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); 718 if (err) 719 return err; 720 721 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); 722 if (err) 723 return err; 724 725 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); 726 if (err) 727 return err; 728 729 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); 730 if (err) 731 return err; 732 733 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); 734 if (err) 735 return err; 736 737 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); 738 if (err) 739 return err; 740 741 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; 742 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); 743 if (err) 744 return err; 745 746 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) 747 nbp_vlan_tunnel_info_flush(p); 748 749 if (tb[IFLA_BRPORT_COST]) { 750 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 751 if (err) 752 return err; 753 } 754 755 if (tb[IFLA_BRPORT_PRIORITY]) { 756 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); 757 if (err) 758 return err; 759 } 760 761 if (tb[IFLA_BRPORT_STATE]) { 762 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); 763 if (err) 764 return err; 765 } 766 767 if (tb[IFLA_BRPORT_FLUSH]) 768 br_fdb_delete_by_port(p->br, p, 0, 0); 769 770 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 771 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { 772 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); 773 774 err = br_multicast_set_port_router(p, mcast_router); 775 if (err) 776 return err; 777 } 778 #endif 779 780 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) { 781 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]); 782 783 if (fwd_mask & BR_GROUPFWD_MACPAUSE) 784 return -EINVAL; 785 p->group_fwd_mask = fwd_mask; 786 } 787 788 br_port_flags_change(p, old_flags ^ p->flags); 789 return 0; 790 } 791 792 /* Change state and parameters on port. */ 793 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 794 { 795 struct nlattr *protinfo; 796 struct nlattr *afspec; 797 struct net_bridge_port *p; 798 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 799 int err = 0; 800 801 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); 802 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 803 if (!protinfo && !afspec) 804 return 0; 805 806 p = br_port_get_rtnl(dev); 807 /* We want to accept dev as bridge itself if the AF_SPEC 808 * is set to see if someone is setting vlan info on the bridge 809 */ 810 if (!p && !afspec) 811 return -EINVAL; 812 813 if (p && protinfo) { 814 if (protinfo->nla_type & NLA_F_NESTED) { 815 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, 816 br_port_policy, NULL); 817 if (err) 818 return err; 819 820 spin_lock_bh(&p->br->lock); 821 err = br_setport(p, tb); 822 spin_unlock_bh(&p->br->lock); 823 } else { 824 /* Binary compatibility with old RSTP */ 825 if (nla_len(protinfo) < sizeof(u8)) 826 return -EINVAL; 827 828 spin_lock_bh(&p->br->lock); 829 err = br_set_port_state(p, nla_get_u8(protinfo)); 830 spin_unlock_bh(&p->br->lock); 831 } 832 if (err) 833 goto out; 834 } 835 836 if (afspec) { 837 err = br_afspec((struct net_bridge *)netdev_priv(dev), p, 838 afspec, RTM_SETLINK); 839 } 840 841 if (err == 0) 842 br_ifinfo_notify(RTM_NEWLINK, p); 843 out: 844 return err; 845 } 846 847 /* Delete port information */ 848 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 849 { 850 struct nlattr *afspec; 851 struct net_bridge_port *p; 852 int err = 0; 853 854 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 855 if (!afspec) 856 return 0; 857 858 p = br_port_get_rtnl(dev); 859 /* We want to accept dev as bridge itself as well */ 860 if (!p && !(dev->priv_flags & IFF_EBRIDGE)) 861 return -EINVAL; 862 863 err = br_afspec((struct net_bridge *)netdev_priv(dev), p, 864 afspec, RTM_DELLINK); 865 if (err == 0) 866 /* Send RTM_NEWLINK because userspace 867 * expects RTM_NEWLINK for vlan dels 868 */ 869 br_ifinfo_notify(RTM_NEWLINK, p); 870 871 return err; 872 } 873 874 static int br_validate(struct nlattr *tb[], struct nlattr *data[], 875 struct netlink_ext_ack *extack) 876 { 877 if (tb[IFLA_ADDRESS]) { 878 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 879 return -EINVAL; 880 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 881 return -EADDRNOTAVAIL; 882 } 883 884 if (!data) 885 return 0; 886 887 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 888 if (data[IFLA_BR_VLAN_PROTOCOL]) { 889 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) { 890 case htons(ETH_P_8021Q): 891 case htons(ETH_P_8021AD): 892 break; 893 default: 894 return -EPROTONOSUPPORT; 895 } 896 } 897 898 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 899 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 900 901 if (defpvid >= VLAN_VID_MASK) 902 return -EINVAL; 903 } 904 #endif 905 906 return 0; 907 } 908 909 static int br_port_slave_changelink(struct net_device *brdev, 910 struct net_device *dev, 911 struct nlattr *tb[], 912 struct nlattr *data[], 913 struct netlink_ext_ack *extack) 914 { 915 struct net_bridge *br = netdev_priv(brdev); 916 int ret; 917 918 if (!data) 919 return 0; 920 921 spin_lock_bh(&br->lock); 922 ret = br_setport(br_port_get_rtnl(dev), data); 923 spin_unlock_bh(&br->lock); 924 925 return ret; 926 } 927 928 static int br_port_fill_slave_info(struct sk_buff *skb, 929 const struct net_device *brdev, 930 const struct net_device *dev) 931 { 932 return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); 933 } 934 935 static size_t br_port_get_slave_size(const struct net_device *brdev, 936 const struct net_device *dev) 937 { 938 return br_port_info_size(); 939 } 940 941 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { 942 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, 943 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, 944 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, 945 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, 946 [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, 947 [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, 948 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, 949 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, 950 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, 951 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, 952 .len = ETH_ALEN }, 953 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, 954 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, 955 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, 956 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, 957 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, 958 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, 959 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 960 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 961 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 962 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 963 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 964 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 965 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 966 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 967 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, 968 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, 969 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 970 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 971 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 972 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, 973 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 974 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 975 }; 976 977 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 978 struct nlattr *data[], 979 struct netlink_ext_ack *extack) 980 { 981 struct net_bridge *br = netdev_priv(brdev); 982 int err; 983 984 if (!data) 985 return 0; 986 987 if (data[IFLA_BR_FORWARD_DELAY]) { 988 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); 989 if (err) 990 return err; 991 } 992 993 if (data[IFLA_BR_HELLO_TIME]) { 994 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); 995 if (err) 996 return err; 997 } 998 999 if (data[IFLA_BR_MAX_AGE]) { 1000 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); 1001 if (err) 1002 return err; 1003 } 1004 1005 if (data[IFLA_BR_AGEING_TIME]) { 1006 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); 1007 if (err) 1008 return err; 1009 } 1010 1011 if (data[IFLA_BR_STP_STATE]) { 1012 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); 1013 1014 br_stp_set_enabled(br, stp_enabled); 1015 } 1016 1017 if (data[IFLA_BR_PRIORITY]) { 1018 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); 1019 1020 br_stp_set_bridge_priority(br, priority); 1021 } 1022 1023 if (data[IFLA_BR_VLAN_FILTERING]) { 1024 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); 1025 1026 err = __br_vlan_filter_toggle(br, vlan_filter); 1027 if (err) 1028 return err; 1029 } 1030 1031 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1032 if (data[IFLA_BR_VLAN_PROTOCOL]) { 1033 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); 1034 1035 err = __br_vlan_set_proto(br, vlan_proto); 1036 if (err) 1037 return err; 1038 } 1039 1040 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1041 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1042 1043 err = __br_vlan_set_default_pvid(br, defpvid); 1044 if (err) 1045 return err; 1046 } 1047 1048 if (data[IFLA_BR_VLAN_STATS_ENABLED]) { 1049 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); 1050 1051 err = br_vlan_set_stats(br, vlan_stats); 1052 if (err) 1053 return err; 1054 } 1055 #endif 1056 1057 if (data[IFLA_BR_GROUP_FWD_MASK]) { 1058 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); 1059 1060 if (fwd_mask & BR_GROUPFWD_RESTRICTED) 1061 return -EINVAL; 1062 br->group_fwd_mask = fwd_mask; 1063 } 1064 1065 if (data[IFLA_BR_GROUP_ADDR]) { 1066 u8 new_addr[ETH_ALEN]; 1067 1068 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) 1069 return -EINVAL; 1070 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); 1071 if (!is_link_local_ether_addr(new_addr)) 1072 return -EINVAL; 1073 if (new_addr[5] == 1 || /* 802.3x Pause address */ 1074 new_addr[5] == 2 || /* 802.3ad Slow protocols */ 1075 new_addr[5] == 3) /* 802.1X PAE address */ 1076 return -EINVAL; 1077 spin_lock_bh(&br->lock); 1078 memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); 1079 spin_unlock_bh(&br->lock); 1080 br->group_addr_set = true; 1081 br_recalculate_fwd_mask(br); 1082 } 1083 1084 if (data[IFLA_BR_FDB_FLUSH]) 1085 br_fdb_flush(br); 1086 1087 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1088 if (data[IFLA_BR_MCAST_ROUTER]) { 1089 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); 1090 1091 err = br_multicast_set_router(br, multicast_router); 1092 if (err) 1093 return err; 1094 } 1095 1096 if (data[IFLA_BR_MCAST_SNOOPING]) { 1097 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); 1098 1099 err = br_multicast_toggle(br, mcast_snooping); 1100 if (err) 1101 return err; 1102 } 1103 1104 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { 1105 u8 val; 1106 1107 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); 1108 br->multicast_query_use_ifaddr = !!val; 1109 } 1110 1111 if (data[IFLA_BR_MCAST_QUERIER]) { 1112 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); 1113 1114 err = br_multicast_set_querier(br, mcast_querier); 1115 if (err) 1116 return err; 1117 } 1118 1119 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) { 1120 u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]); 1121 1122 br->hash_elasticity = val; 1123 } 1124 1125 if (data[IFLA_BR_MCAST_HASH_MAX]) { 1126 u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); 1127 1128 err = br_multicast_set_hash_max(br, hash_max); 1129 if (err) 1130 return err; 1131 } 1132 1133 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { 1134 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); 1135 1136 br->multicast_last_member_count = val; 1137 } 1138 1139 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { 1140 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); 1141 1142 br->multicast_startup_query_count = val; 1143 } 1144 1145 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { 1146 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); 1147 1148 br->multicast_last_member_interval = clock_t_to_jiffies(val); 1149 } 1150 1151 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { 1152 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); 1153 1154 br->multicast_membership_interval = clock_t_to_jiffies(val); 1155 } 1156 1157 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { 1158 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); 1159 1160 br->multicast_querier_interval = clock_t_to_jiffies(val); 1161 } 1162 1163 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1164 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1165 1166 br->multicast_query_interval = clock_t_to_jiffies(val); 1167 } 1168 1169 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { 1170 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); 1171 1172 br->multicast_query_response_interval = clock_t_to_jiffies(val); 1173 } 1174 1175 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1176 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1177 1178 br->multicast_startup_query_interval = clock_t_to_jiffies(val); 1179 } 1180 1181 if (data[IFLA_BR_MCAST_STATS_ENABLED]) { 1182 __u8 mcast_stats; 1183 1184 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); 1185 br->multicast_stats_enabled = !!mcast_stats; 1186 } 1187 1188 if (data[IFLA_BR_MCAST_IGMP_VERSION]) { 1189 __u8 igmp_version; 1190 1191 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); 1192 err = br_multicast_set_igmp_version(br, igmp_version); 1193 if (err) 1194 return err; 1195 } 1196 1197 #if IS_ENABLED(CONFIG_IPV6) 1198 if (data[IFLA_BR_MCAST_MLD_VERSION]) { 1199 __u8 mld_version; 1200 1201 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); 1202 err = br_multicast_set_mld_version(br, mld_version); 1203 if (err) 1204 return err; 1205 } 1206 #endif 1207 #endif 1208 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1209 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1210 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); 1211 1212 br->nf_call_iptables = val ? true : false; 1213 } 1214 1215 if (data[IFLA_BR_NF_CALL_IP6TABLES]) { 1216 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); 1217 1218 br->nf_call_ip6tables = val ? true : false; 1219 } 1220 1221 if (data[IFLA_BR_NF_CALL_ARPTABLES]) { 1222 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); 1223 1224 br->nf_call_arptables = val ? true : false; 1225 } 1226 #endif 1227 1228 return 0; 1229 } 1230 1231 static int br_dev_newlink(struct net *src_net, struct net_device *dev, 1232 struct nlattr *tb[], struct nlattr *data[], 1233 struct netlink_ext_ack *extack) 1234 { 1235 struct net_bridge *br = netdev_priv(dev); 1236 int err; 1237 1238 if (tb[IFLA_ADDRESS]) { 1239 spin_lock_bh(&br->lock); 1240 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1241 spin_unlock_bh(&br->lock); 1242 } 1243 1244 err = register_netdevice(dev); 1245 if (err) 1246 return err; 1247 1248 err = br_changelink(dev, tb, data, extack); 1249 if (err) 1250 unregister_netdevice(dev); 1251 return err; 1252 } 1253 1254 static size_t br_get_size(const struct net_device *brdev) 1255 { 1256 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1257 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ 1258 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ 1259 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ 1260 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ 1261 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 1262 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 1263 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1264 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ 1265 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ 1266 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ 1267 #endif 1268 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ 1269 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ 1270 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ 1271 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ 1272 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ 1273 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ 1274 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ 1275 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ 1276 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ 1277 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ 1278 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ 1279 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ 1280 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1281 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ 1282 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1283 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1284 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1285 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ 1286 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1287 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1288 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1289 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ 1290 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ 1291 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ 1292 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ 1293 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ 1294 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ 1295 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ 1296 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ 1297 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ 1298 #endif 1299 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1300 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ 1301 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ 1302 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ 1303 #endif 1304 0; 1305 } 1306 1307 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) 1308 { 1309 struct net_bridge *br = netdev_priv(brdev); 1310 u32 forward_delay = jiffies_to_clock_t(br->forward_delay); 1311 u32 hello_time = jiffies_to_clock_t(br->hello_time); 1312 u32 age_time = jiffies_to_clock_t(br->max_age); 1313 u32 ageing_time = jiffies_to_clock_t(br->ageing_time); 1314 u32 stp_enabled = br->stp_enabled; 1315 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; 1316 u8 vlan_enabled = br_vlan_enabled(br->dev); 1317 u64 clockval; 1318 1319 clockval = br_timer_value(&br->hello_timer); 1320 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) 1321 return -EMSGSIZE; 1322 clockval = br_timer_value(&br->tcn_timer); 1323 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) 1324 return -EMSGSIZE; 1325 clockval = br_timer_value(&br->topology_change_timer); 1326 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, 1327 IFLA_BR_PAD)) 1328 return -EMSGSIZE; 1329 clockval = br_timer_value(&br->gc_work.timer); 1330 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) 1331 return -EMSGSIZE; 1332 1333 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || 1334 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || 1335 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || 1336 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || 1337 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || 1338 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || 1339 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || 1340 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || 1341 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), 1342 &br->bridge_id) || 1343 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), 1344 &br->designated_root) || 1345 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || 1346 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || 1347 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || 1348 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 1349 br->topology_change_detected) || 1350 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr)) 1351 return -EMSGSIZE; 1352 1353 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1354 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || 1355 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || 1356 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled)) 1357 return -EMSGSIZE; 1358 #endif 1359 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1360 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || 1361 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || 1362 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1363 br->multicast_query_use_ifaddr) || 1364 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || 1365 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, 1366 br->multicast_stats_enabled) || 1367 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, 1368 br->hash_elasticity) || 1369 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1370 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, 1371 br->multicast_last_member_count) || 1372 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, 1373 br->multicast_startup_query_count) || 1374 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, 1375 br->multicast_igmp_version)) 1376 return -EMSGSIZE; 1377 #if IS_ENABLED(CONFIG_IPV6) 1378 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, 1379 br->multicast_mld_version)) 1380 return -EMSGSIZE; 1381 #endif 1382 clockval = jiffies_to_clock_t(br->multicast_last_member_interval); 1383 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, 1384 IFLA_BR_PAD)) 1385 return -EMSGSIZE; 1386 clockval = jiffies_to_clock_t(br->multicast_membership_interval); 1387 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, 1388 IFLA_BR_PAD)) 1389 return -EMSGSIZE; 1390 clockval = jiffies_to_clock_t(br->multicast_querier_interval); 1391 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, 1392 IFLA_BR_PAD)) 1393 return -EMSGSIZE; 1394 clockval = jiffies_to_clock_t(br->multicast_query_interval); 1395 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, 1396 IFLA_BR_PAD)) 1397 return -EMSGSIZE; 1398 clockval = jiffies_to_clock_t(br->multicast_query_response_interval); 1399 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, 1400 IFLA_BR_PAD)) 1401 return -EMSGSIZE; 1402 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); 1403 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, 1404 IFLA_BR_PAD)) 1405 return -EMSGSIZE; 1406 #endif 1407 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1408 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, 1409 br->nf_call_iptables ? 1 : 0) || 1410 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, 1411 br->nf_call_ip6tables ? 1 : 0) || 1412 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, 1413 br->nf_call_arptables ? 1 : 0)) 1414 return -EMSGSIZE; 1415 #endif 1416 1417 return 0; 1418 } 1419 1420 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) 1421 { 1422 struct net_bridge_port *p = NULL; 1423 struct net_bridge_vlan_group *vg; 1424 struct net_bridge_vlan *v; 1425 struct net_bridge *br; 1426 int numvls = 0; 1427 1428 switch (attr) { 1429 case IFLA_STATS_LINK_XSTATS: 1430 br = netdev_priv(dev); 1431 vg = br_vlan_group(br); 1432 break; 1433 case IFLA_STATS_LINK_XSTATS_SLAVE: 1434 p = br_port_get_rtnl(dev); 1435 if (!p) 1436 return 0; 1437 br = p->br; 1438 vg = nbp_vlan_group(p); 1439 break; 1440 default: 1441 return 0; 1442 } 1443 1444 if (vg) { 1445 /* we need to count all, even placeholder entries */ 1446 list_for_each_entry(v, &vg->vlan_list, vlist) 1447 numvls++; 1448 } 1449 1450 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1451 nla_total_size(sizeof(struct br_mcast_stats)) + 1452 nla_total_size(0); 1453 } 1454 1455 static int br_fill_linkxstats(struct sk_buff *skb, 1456 const struct net_device *dev, 1457 int *prividx, int attr) 1458 { 1459 struct nlattr *nla __maybe_unused; 1460 struct net_bridge_port *p = NULL; 1461 struct net_bridge_vlan_group *vg; 1462 struct net_bridge_vlan *v; 1463 struct net_bridge *br; 1464 struct nlattr *nest; 1465 int vl_idx = 0; 1466 1467 switch (attr) { 1468 case IFLA_STATS_LINK_XSTATS: 1469 br = netdev_priv(dev); 1470 vg = br_vlan_group(br); 1471 break; 1472 case IFLA_STATS_LINK_XSTATS_SLAVE: 1473 p = br_port_get_rtnl(dev); 1474 if (!p) 1475 return 0; 1476 br = p->br; 1477 vg = nbp_vlan_group(p); 1478 break; 1479 default: 1480 return -EINVAL; 1481 } 1482 1483 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); 1484 if (!nest) 1485 return -EMSGSIZE; 1486 1487 if (vg) { 1488 u16 pvid; 1489 1490 pvid = br_get_pvid(vg); 1491 list_for_each_entry(v, &vg->vlan_list, vlist) { 1492 struct bridge_vlan_xstats vxi; 1493 struct br_vlan_stats stats; 1494 1495 if (++vl_idx < *prividx) 1496 continue; 1497 memset(&vxi, 0, sizeof(vxi)); 1498 vxi.vid = v->vid; 1499 vxi.flags = v->flags; 1500 if (v->vid == pvid) 1501 vxi.flags |= BRIDGE_VLAN_INFO_PVID; 1502 br_vlan_get_stats(v, &stats); 1503 vxi.rx_bytes = stats.rx_bytes; 1504 vxi.rx_packets = stats.rx_packets; 1505 vxi.tx_bytes = stats.tx_bytes; 1506 vxi.tx_packets = stats.tx_packets; 1507 1508 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1509 goto nla_put_failure; 1510 } 1511 } 1512 1513 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1514 if (++vl_idx >= *prividx) { 1515 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, 1516 sizeof(struct br_mcast_stats), 1517 BRIDGE_XSTATS_PAD); 1518 if (!nla) 1519 goto nla_put_failure; 1520 br_multicast_get_stats(br, p, nla_data(nla)); 1521 } 1522 #endif 1523 nla_nest_end(skb, nest); 1524 *prividx = 0; 1525 1526 return 0; 1527 1528 nla_put_failure: 1529 nla_nest_end(skb, nest); 1530 *prividx = vl_idx; 1531 1532 return -EMSGSIZE; 1533 } 1534 1535 static struct rtnl_af_ops br_af_ops __read_mostly = { 1536 .family = AF_BRIDGE, 1537 .get_link_af_size = br_get_link_af_size_filtered, 1538 }; 1539 1540 struct rtnl_link_ops br_link_ops __read_mostly = { 1541 .kind = "bridge", 1542 .priv_size = sizeof(struct net_bridge), 1543 .setup = br_dev_setup, 1544 .maxtype = IFLA_BR_MAX, 1545 .policy = br_policy, 1546 .validate = br_validate, 1547 .newlink = br_dev_newlink, 1548 .changelink = br_changelink, 1549 .dellink = br_dev_delete, 1550 .get_size = br_get_size, 1551 .fill_info = br_fill_info, 1552 .fill_linkxstats = br_fill_linkxstats, 1553 .get_linkxstats_size = br_get_linkxstats_size, 1554 1555 .slave_maxtype = IFLA_BRPORT_MAX, 1556 .slave_policy = br_port_policy, 1557 .slave_changelink = br_port_slave_changelink, 1558 .get_slave_size = br_port_get_slave_size, 1559 .fill_slave_info = br_port_fill_slave_info, 1560 }; 1561 1562 int __init br_netlink_init(void) 1563 { 1564 int err; 1565 1566 br_mdb_init(); 1567 rtnl_af_register(&br_af_ops); 1568 1569 err = rtnl_link_register(&br_link_ops); 1570 if (err) 1571 goto out_af; 1572 1573 return 0; 1574 1575 out_af: 1576 rtnl_af_unregister(&br_af_ops); 1577 br_mdb_uninit(); 1578 return err; 1579 } 1580 1581 void br_netlink_fini(void) 1582 { 1583 br_mdb_uninit(); 1584 rtnl_af_unregister(&br_af_ops); 1585 rtnl_link_unregister(&br_link_ops); 1586 } 1587