1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com> 3 #include <linux/kernel.h> 4 #include <linux/netdevice.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/slab.h> 7 #include <net/ip_tunnels.h> 8 9 #include "br_private.h" 10 #include "br_private_tunnel.h" 11 12 static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v) 13 { 14 __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id); 15 struct nlattr *nest; 16 17 if (!v->tinfo.tunnel_dst) 18 return true; 19 20 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO); 21 if (!nest) 22 return false; 23 if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) { 24 nla_nest_cancel(skb, nest); 25 return false; 26 } 27 nla_nest_end(skb, nest); 28 29 return true; 30 } 31 32 static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr, 33 const struct net_bridge_vlan *range_end) 34 { 35 return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) || 36 vlan_tunid_inrange(v_curr, range_end); 37 } 38 39 /* check if the options' state of v_curr allow it to enter the range */ 40 bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr, 41 const struct net_bridge_vlan *range_end) 42 { 43 return v_curr->state == range_end->state && 44 __vlan_tun_can_enter_range(v_curr, range_end); 45 } 46 47 bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v) 48 { 49 return !nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, 50 br_vlan_get_state(v)) && 51 __vlan_tun_put(skb, v); 52 } 53 54 size_t br_vlan_opts_nl_size(void) 55 { 56 return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */ 57 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */ 58 + nla_total_size(sizeof(u32)); /* BRIDGE_VLANDB_TINFO_ID */ 59 } 60 61 static int br_vlan_modify_state(struct net_bridge_vlan_group *vg, 62 struct net_bridge_vlan *v, 63 u8 state, 64 bool *changed, 65 struct netlink_ext_ack *extack) 66 { 67 struct net_bridge *br; 68 69 ASSERT_RTNL(); 70 71 if (state > BR_STATE_BLOCKING) { 72 NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state"); 73 return -EINVAL; 74 } 75 76 if (br_vlan_is_brentry(v)) 77 br = v->br; 78 else 79 br = v->port->br; 80 81 if (br->stp_enabled == BR_KERNEL_STP) { 82 NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP"); 83 return -EBUSY; 84 } 85 86 if (v->state == state) 87 return 0; 88 89 if (v->vid == br_get_pvid(vg)) 90 br_vlan_set_pvid_state(vg, state); 91 92 br_vlan_set_state(v, state); 93 *changed = true; 94 95 return 0; 96 } 97 98 static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = { 99 [BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 }, 100 [BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 }, 101 }; 102 103 static int br_vlan_modify_tunnel(const struct net_bridge_port *p, 104 struct net_bridge_vlan *v, 105 struct nlattr **tb, 106 bool *changed, 107 struct netlink_ext_ack *extack) 108 { 109 struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr; 110 struct bridge_vlan_info *vinfo; 111 u32 tun_id = 0; 112 int cmd, err; 113 114 if (!p) { 115 NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans"); 116 return -EINVAL; 117 } 118 if (!(p->flags & BR_VLAN_TUNNEL)) { 119 NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set"); 120 return -EINVAL; 121 } 122 123 attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]; 124 err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr, 125 br_vlandb_tinfo_pol, extack); 126 if (err) 127 return err; 128 129 if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) { 130 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute"); 131 return -ENOENT; 132 } 133 cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]); 134 switch (cmd) { 135 case RTM_SETLINK: 136 if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) { 137 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute"); 138 return -ENOENT; 139 } 140 /* when working on vlan ranges this is the starting tunnel id */ 141 tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]); 142 /* vlan info attr is guaranteed by br_vlan_rtm_process_one */ 143 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]); 144 /* tunnel ids are mapped to each vlan in increasing order, 145 * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the 146 * current vlan, so we compute: tun_id + v - vinfo->vid 147 */ 148 tun_id += v->vid - vinfo->vid; 149 break; 150 case RTM_DELLINK: 151 break; 152 default: 153 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command"); 154 return -EINVAL; 155 } 156 157 return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed); 158 } 159 160 static int br_vlan_process_one_opts(const struct net_bridge *br, 161 const struct net_bridge_port *p, 162 struct net_bridge_vlan_group *vg, 163 struct net_bridge_vlan *v, 164 struct nlattr **tb, 165 bool *changed, 166 struct netlink_ext_ack *extack) 167 { 168 int err; 169 170 *changed = false; 171 if (tb[BRIDGE_VLANDB_ENTRY_STATE]) { 172 u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]); 173 174 err = br_vlan_modify_state(vg, v, state, changed, extack); 175 if (err) 176 return err; 177 } 178 if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) { 179 err = br_vlan_modify_tunnel(p, v, tb, changed, extack); 180 if (err) 181 return err; 182 } 183 184 return 0; 185 } 186 187 int br_vlan_process_options(const struct net_bridge *br, 188 const struct net_bridge_port *p, 189 struct net_bridge_vlan *range_start, 190 struct net_bridge_vlan *range_end, 191 struct nlattr **tb, 192 struct netlink_ext_ack *extack) 193 { 194 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL; 195 struct net_bridge_vlan_group *vg; 196 int vid, err = 0; 197 u16 pvid; 198 199 if (p) 200 vg = nbp_vlan_group(p); 201 else 202 vg = br_vlan_group(br); 203 204 if (!range_start || !br_vlan_should_use(range_start)) { 205 NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options"); 206 return -ENOENT; 207 } 208 if (!range_end || !br_vlan_should_use(range_end)) { 209 NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options"); 210 return -ENOENT; 211 } 212 213 pvid = br_get_pvid(vg); 214 for (vid = range_start->vid; vid <= range_end->vid; vid++) { 215 bool changed = false; 216 217 v = br_vlan_find(vg, vid); 218 if (!v || !br_vlan_should_use(v)) { 219 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options"); 220 err = -ENOENT; 221 break; 222 } 223 224 err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed, 225 extack); 226 if (err) 227 break; 228 229 if (changed) { 230 /* vlan options changed, check for range */ 231 if (!curr_start) { 232 curr_start = v; 233 curr_end = v; 234 continue; 235 } 236 237 if (v->vid == pvid || 238 !br_vlan_can_enter_range(v, curr_end)) { 239 br_vlan_notify(br, p, curr_start->vid, 240 curr_end->vid, RTM_NEWVLAN); 241 curr_start = v; 242 } 243 curr_end = v; 244 } else { 245 /* nothing changed and nothing to notify yet */ 246 if (!curr_start) 247 continue; 248 249 br_vlan_notify(br, p, curr_start->vid, curr_end->vid, 250 RTM_NEWVLAN); 251 curr_start = NULL; 252 curr_end = NULL; 253 } 254 } 255 if (curr_start) 256 br_vlan_notify(br, p, curr_start->vid, curr_end->vid, 257 RTM_NEWVLAN); 258 259 return err; 260 } 261 262 bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr, 263 const struct net_bridge_vlan *r_end) 264 { 265 return v_curr->vid - r_end->vid == 1 && 266 ((v_curr->priv_flags ^ r_end->priv_flags) & 267 BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 && 268 br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx, 269 &r_end->br_mcast_ctx); 270 } 271 272 bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range, 273 const struct net_bridge_vlan *v_opts) 274 { 275 u64 clockval __maybe_unused; 276 struct nlattr *nest; 277 278 nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS); 279 if (!nest) 280 return false; 281 282 if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid)) 283 goto out_err; 284 285 if (vid_range && vid < vid_range && 286 nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range)) 287 goto out_err; 288 289 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 290 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING, 291 !!(v_opts->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) || 292 nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION, 293 v_opts->br_mcast_ctx.multicast_igmp_version) || 294 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT, 295 v_opts->br_mcast_ctx.multicast_last_member_count) || 296 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT, 297 v_opts->br_mcast_ctx.multicast_startup_query_count)) 298 goto out_err; 299 300 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval); 301 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL, 302 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 303 goto out_err; 304 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval); 305 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL, 306 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 307 goto out_err; 308 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval); 309 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL, 310 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 311 goto out_err; 312 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval); 313 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL, 314 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 315 goto out_err; 316 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval); 317 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL, 318 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 319 goto out_err; 320 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval); 321 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL, 322 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 323 goto out_err; 324 325 #if IS_ENABLED(CONFIG_IPV6) 326 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION, 327 v_opts->br_mcast_ctx.multicast_mld_version)) 328 goto out_err; 329 #endif 330 #endif 331 332 nla_nest_end(skb, nest); 333 334 return true; 335 336 out_err: 337 nla_nest_cancel(skb, nest); 338 return false; 339 } 340 341 static size_t rtnl_vlan_global_opts_nlmsg_size(void) 342 { 343 return NLMSG_ALIGN(sizeof(struct br_vlan_msg)) 344 + nla_total_size(0) /* BRIDGE_VLANDB_GLOBAL_OPTIONS */ 345 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_ID */ 346 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 347 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING */ 348 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION */ 349 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION */ 350 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT */ 351 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT */ 352 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL */ 353 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL */ 354 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL */ 355 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL */ 356 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL */ 357 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL */ 358 #endif 359 + nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */ 360 } 361 362 static void br_vlan_global_opts_notify(const struct net_bridge *br, 363 u16 vid, u16 vid_range) 364 { 365 struct net_bridge_vlan *v; 366 struct br_vlan_msg *bvm; 367 struct nlmsghdr *nlh; 368 struct sk_buff *skb; 369 int err = -ENOBUFS; 370 371 /* right now notifications are done only with rtnl held */ 372 ASSERT_RTNL(); 373 374 skb = nlmsg_new(rtnl_vlan_global_opts_nlmsg_size(), GFP_KERNEL); 375 if (!skb) 376 goto out_err; 377 378 err = -EMSGSIZE; 379 nlh = nlmsg_put(skb, 0, 0, RTM_NEWVLAN, sizeof(*bvm), 0); 380 if (!nlh) 381 goto out_err; 382 bvm = nlmsg_data(nlh); 383 memset(bvm, 0, sizeof(*bvm)); 384 bvm->family = AF_BRIDGE; 385 bvm->ifindex = br->dev->ifindex; 386 387 /* need to find the vlan due to flags/options */ 388 v = br_vlan_find(br_vlan_group(br), vid); 389 if (!v) 390 goto out_kfree; 391 392 if (!br_vlan_global_opts_fill(skb, vid, vid_range, v)) 393 goto out_err; 394 395 nlmsg_end(skb, nlh); 396 rtnl_notify(skb, dev_net(br->dev), 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL); 397 return; 398 399 out_err: 400 rtnl_set_sk_err(dev_net(br->dev), RTNLGRP_BRVLAN, err); 401 out_kfree: 402 kfree_skb(skb); 403 } 404 405 static int br_vlan_process_global_one_opts(const struct net_bridge *br, 406 struct net_bridge_vlan_group *vg, 407 struct net_bridge_vlan *v, 408 struct nlattr **tb, 409 bool *changed, 410 struct netlink_ext_ack *extack) 411 { 412 int err __maybe_unused; 413 414 *changed = false; 415 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 416 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]) { 417 u8 mc_snooping; 418 419 mc_snooping = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]); 420 if (br_multicast_toggle_global_vlan(v, !!mc_snooping)) 421 *changed = true; 422 } 423 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]) { 424 u8 ver; 425 426 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]); 427 err = br_multicast_set_igmp_version(&v->br_mcast_ctx, ver); 428 if (err) 429 return err; 430 *changed = true; 431 } 432 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]) { 433 u32 cnt; 434 435 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]); 436 v->br_mcast_ctx.multicast_last_member_count = cnt; 437 *changed = true; 438 } 439 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]) { 440 u32 cnt; 441 442 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]); 443 v->br_mcast_ctx.multicast_startup_query_count = cnt; 444 *changed = true; 445 } 446 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]) { 447 u64 val; 448 449 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]); 450 v->br_mcast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val); 451 *changed = true; 452 } 453 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]) { 454 u64 val; 455 456 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]); 457 v->br_mcast_ctx.multicast_membership_interval = clock_t_to_jiffies(val); 458 *changed = true; 459 } 460 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]) { 461 u64 val; 462 463 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]); 464 v->br_mcast_ctx.multicast_querier_interval = clock_t_to_jiffies(val); 465 *changed = true; 466 } 467 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]) { 468 u64 val; 469 470 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]); 471 v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val); 472 *changed = true; 473 } 474 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) { 475 u64 val; 476 477 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]); 478 v->br_mcast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val); 479 *changed = true; 480 } 481 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]) { 482 u64 val; 483 484 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]); 485 v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); 486 *changed = true; 487 } 488 #if IS_ENABLED(CONFIG_IPV6) 489 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]) { 490 u8 ver; 491 492 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]); 493 err = br_multicast_set_mld_version(&v->br_mcast_ctx, ver); 494 if (err) 495 return err; 496 *changed = true; 497 } 498 #endif 499 #endif 500 501 return 0; 502 } 503 504 static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = { 505 [BRIDGE_VLANDB_GOPTS_ID] = { .type = NLA_U16 }, 506 [BRIDGE_VLANDB_GOPTS_RANGE] = { .type = NLA_U16 }, 507 [BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING] = { .type = NLA_U8 }, 508 [BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 509 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 510 [BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 511 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 512 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 513 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 514 [BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 515 [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 516 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 517 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 518 }; 519 520 int br_vlan_rtm_process_global_options(struct net_device *dev, 521 const struct nlattr *attr, 522 int cmd, 523 struct netlink_ext_ack *extack) 524 { 525 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL; 526 struct nlattr *tb[BRIDGE_VLANDB_GOPTS_MAX + 1]; 527 struct net_bridge_vlan_group *vg; 528 u16 vid, vid_range = 0; 529 struct net_bridge *br; 530 int err = 0; 531 532 if (cmd != RTM_NEWVLAN) { 533 NL_SET_ERR_MSG_MOD(extack, "Global vlan options support only set operation"); 534 return -EINVAL; 535 } 536 if (!netif_is_bridge_master(dev)) { 537 NL_SET_ERR_MSG_MOD(extack, "Global vlan options can only be set on bridge device"); 538 return -EINVAL; 539 } 540 br = netdev_priv(dev); 541 vg = br_vlan_group(br); 542 if (WARN_ON(!vg)) 543 return -ENODEV; 544 545 err = nla_parse_nested(tb, BRIDGE_VLANDB_GOPTS_MAX, attr, 546 br_vlan_db_gpol, extack); 547 if (err) 548 return err; 549 550 if (!tb[BRIDGE_VLANDB_GOPTS_ID]) { 551 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry id"); 552 return -EINVAL; 553 } 554 vid = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_ID]); 555 if (!br_vlan_valid_id(vid, extack)) 556 return -EINVAL; 557 558 if (tb[BRIDGE_VLANDB_GOPTS_RANGE]) { 559 vid_range = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_RANGE]); 560 if (!br_vlan_valid_id(vid_range, extack)) 561 return -EINVAL; 562 if (vid >= vid_range) { 563 NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id"); 564 return -EINVAL; 565 } 566 } else { 567 vid_range = vid; 568 } 569 570 for (; vid <= vid_range; vid++) { 571 bool changed = false; 572 573 v = br_vlan_find(vg, vid); 574 if (!v) { 575 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process global options"); 576 err = -ENOENT; 577 break; 578 } 579 580 err = br_vlan_process_global_one_opts(br, vg, v, tb, &changed, 581 extack); 582 if (err) 583 break; 584 585 if (changed) { 586 /* vlan options changed, check for range */ 587 if (!curr_start) { 588 curr_start = v; 589 curr_end = v; 590 continue; 591 } 592 593 if (!br_vlan_global_opts_can_enter_range(v, curr_end)) { 594 br_vlan_global_opts_notify(br, curr_start->vid, 595 curr_end->vid); 596 curr_start = v; 597 } 598 curr_end = v; 599 } else { 600 /* nothing changed and nothing to notify yet */ 601 if (!curr_start) 602 continue; 603 604 br_vlan_global_opts_notify(br, curr_start->vid, 605 curr_end->vid); 606 curr_start = NULL; 607 curr_end = NULL; 608 } 609 } 610 if (curr_start) 611 br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid); 612 613 return err; 614 } 615