1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com> 3 #include <linux/kernel.h> 4 #include <linux/netdevice.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/slab.h> 7 #include <net/ip_tunnels.h> 8 9 #include "br_private.h" 10 #include "br_private_tunnel.h" 11 12 static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v) 13 { 14 __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id); 15 struct nlattr *nest; 16 17 if (!v->tinfo.tunnel_dst) 18 return true; 19 20 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO); 21 if (!nest) 22 return false; 23 if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) { 24 nla_nest_cancel(skb, nest); 25 return false; 26 } 27 nla_nest_end(skb, nest); 28 29 return true; 30 } 31 32 static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr, 33 const struct net_bridge_vlan *range_end) 34 { 35 return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) || 36 vlan_tunid_inrange(v_curr, range_end); 37 } 38 39 /* check if the options' state of v_curr allow it to enter the range */ 40 bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr, 41 const struct net_bridge_vlan *range_end) 42 { 43 u8 range_mc_rtr = br_vlan_multicast_router(range_end); 44 u8 curr_mc_rtr = br_vlan_multicast_router(v_curr); 45 46 return v_curr->state == range_end->state && 47 __vlan_tun_can_enter_range(v_curr, range_end) && 48 curr_mc_rtr == range_mc_rtr; 49 } 50 51 bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v) 52 { 53 if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) || 54 !__vlan_tun_put(skb, v)) 55 return false; 56 57 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 58 if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_MCAST_ROUTER, 59 br_vlan_multicast_router(v))) 60 return false; 61 #endif 62 63 return true; 64 } 65 66 size_t br_vlan_opts_nl_size(void) 67 { 68 return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */ 69 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */ 70 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_TINFO_ID */ 71 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 72 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_MCAST_ROUTER */ 73 #endif 74 + 0; 75 } 76 77 static int br_vlan_modify_state(struct net_bridge_vlan_group *vg, 78 struct net_bridge_vlan *v, 79 u8 state, 80 bool *changed, 81 struct netlink_ext_ack *extack) 82 { 83 struct net_bridge *br; 84 85 ASSERT_RTNL(); 86 87 if (state > BR_STATE_BLOCKING) { 88 NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state"); 89 return -EINVAL; 90 } 91 92 if (br_vlan_is_brentry(v)) 93 br = v->br; 94 else 95 br = v->port->br; 96 97 if (br->stp_enabled == BR_KERNEL_STP) { 98 NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP"); 99 return -EBUSY; 100 } 101 102 if (v->state == state) 103 return 0; 104 105 if (v->vid == br_get_pvid(vg)) 106 br_vlan_set_pvid_state(vg, state); 107 108 br_vlan_set_state(v, state); 109 *changed = true; 110 111 return 0; 112 } 113 114 static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = { 115 [BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 }, 116 [BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 }, 117 }; 118 119 static int br_vlan_modify_tunnel(const struct net_bridge_port *p, 120 struct net_bridge_vlan *v, 121 struct nlattr **tb, 122 bool *changed, 123 struct netlink_ext_ack *extack) 124 { 125 struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr; 126 struct bridge_vlan_info *vinfo; 127 u32 tun_id = 0; 128 int cmd, err; 129 130 if (!p) { 131 NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans"); 132 return -EINVAL; 133 } 134 if (!(p->flags & BR_VLAN_TUNNEL)) { 135 NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set"); 136 return -EINVAL; 137 } 138 139 attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]; 140 err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr, 141 br_vlandb_tinfo_pol, extack); 142 if (err) 143 return err; 144 145 if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) { 146 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute"); 147 return -ENOENT; 148 } 149 cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]); 150 switch (cmd) { 151 case RTM_SETLINK: 152 if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) { 153 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute"); 154 return -ENOENT; 155 } 156 /* when working on vlan ranges this is the starting tunnel id */ 157 tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]); 158 /* vlan info attr is guaranteed by br_vlan_rtm_process_one */ 159 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]); 160 /* tunnel ids are mapped to each vlan in increasing order, 161 * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the 162 * current vlan, so we compute: tun_id + v - vinfo->vid 163 */ 164 tun_id += v->vid - vinfo->vid; 165 break; 166 case RTM_DELLINK: 167 break; 168 default: 169 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command"); 170 return -EINVAL; 171 } 172 173 return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed); 174 } 175 176 static int br_vlan_process_one_opts(const struct net_bridge *br, 177 const struct net_bridge_port *p, 178 struct net_bridge_vlan_group *vg, 179 struct net_bridge_vlan *v, 180 struct nlattr **tb, 181 bool *changed, 182 struct netlink_ext_ack *extack) 183 { 184 int err; 185 186 *changed = false; 187 if (tb[BRIDGE_VLANDB_ENTRY_STATE]) { 188 u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]); 189 190 err = br_vlan_modify_state(vg, v, state, changed, extack); 191 if (err) 192 return err; 193 } 194 if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) { 195 err = br_vlan_modify_tunnel(p, v, tb, changed, extack); 196 if (err) 197 return err; 198 } 199 200 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 201 if (tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]) { 202 u8 val; 203 204 val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]); 205 err = br_multicast_set_vlan_router(v, val); 206 if (err) 207 return err; 208 *changed = true; 209 } 210 #endif 211 212 return 0; 213 } 214 215 int br_vlan_process_options(const struct net_bridge *br, 216 const struct net_bridge_port *p, 217 struct net_bridge_vlan *range_start, 218 struct net_bridge_vlan *range_end, 219 struct nlattr **tb, 220 struct netlink_ext_ack *extack) 221 { 222 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL; 223 struct net_bridge_vlan_group *vg; 224 int vid, err = 0; 225 u16 pvid; 226 227 if (p) 228 vg = nbp_vlan_group(p); 229 else 230 vg = br_vlan_group(br); 231 232 if (!range_start || !br_vlan_should_use(range_start)) { 233 NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options"); 234 return -ENOENT; 235 } 236 if (!range_end || !br_vlan_should_use(range_end)) { 237 NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options"); 238 return -ENOENT; 239 } 240 241 pvid = br_get_pvid(vg); 242 for (vid = range_start->vid; vid <= range_end->vid; vid++) { 243 bool changed = false; 244 245 v = br_vlan_find(vg, vid); 246 if (!v || !br_vlan_should_use(v)) { 247 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options"); 248 err = -ENOENT; 249 break; 250 } 251 252 err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed, 253 extack); 254 if (err) 255 break; 256 257 if (changed) { 258 /* vlan options changed, check for range */ 259 if (!curr_start) { 260 curr_start = v; 261 curr_end = v; 262 continue; 263 } 264 265 if (v->vid == pvid || 266 !br_vlan_can_enter_range(v, curr_end)) { 267 br_vlan_notify(br, p, curr_start->vid, 268 curr_end->vid, RTM_NEWVLAN); 269 curr_start = v; 270 } 271 curr_end = v; 272 } else { 273 /* nothing changed and nothing to notify yet */ 274 if (!curr_start) 275 continue; 276 277 br_vlan_notify(br, p, curr_start->vid, curr_end->vid, 278 RTM_NEWVLAN); 279 curr_start = NULL; 280 curr_end = NULL; 281 } 282 } 283 if (curr_start) 284 br_vlan_notify(br, p, curr_start->vid, curr_end->vid, 285 RTM_NEWVLAN); 286 287 return err; 288 } 289 290 bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr, 291 const struct net_bridge_vlan *r_end) 292 { 293 return v_curr->vid - r_end->vid == 1 && 294 ((v_curr->priv_flags ^ r_end->priv_flags) & 295 BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 && 296 br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx, 297 &r_end->br_mcast_ctx); 298 } 299 300 bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range, 301 const struct net_bridge_vlan *v_opts) 302 { 303 struct nlattr *nest2 __maybe_unused; 304 u64 clockval __maybe_unused; 305 struct nlattr *nest; 306 307 nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS); 308 if (!nest) 309 return false; 310 311 if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid)) 312 goto out_err; 313 314 if (vid_range && vid < vid_range && 315 nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range)) 316 goto out_err; 317 318 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 319 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING, 320 !!(v_opts->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) || 321 nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION, 322 v_opts->br_mcast_ctx.multicast_igmp_version) || 323 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT, 324 v_opts->br_mcast_ctx.multicast_last_member_count) || 325 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT, 326 v_opts->br_mcast_ctx.multicast_startup_query_count) || 327 nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER, 328 v_opts->br_mcast_ctx.multicast_querier) || 329 br_multicast_dump_querier_state(skb, &v_opts->br_mcast_ctx, 330 BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE)) 331 goto out_err; 332 333 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval); 334 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL, 335 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 336 goto out_err; 337 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval); 338 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL, 339 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 340 goto out_err; 341 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval); 342 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL, 343 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 344 goto out_err; 345 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval); 346 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL, 347 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 348 goto out_err; 349 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval); 350 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL, 351 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 352 goto out_err; 353 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval); 354 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL, 355 clockval, BRIDGE_VLANDB_GOPTS_PAD)) 356 goto out_err; 357 358 if (br_rports_have_mc_router(&v_opts->br_mcast_ctx)) { 359 nest2 = nla_nest_start(skb, 360 BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS); 361 if (!nest2) 362 goto out_err; 363 364 rcu_read_lock(); 365 if (br_rports_fill_info(skb, &v_opts->br_mcast_ctx)) { 366 rcu_read_unlock(); 367 nla_nest_cancel(skb, nest2); 368 goto out_err; 369 } 370 rcu_read_unlock(); 371 372 nla_nest_end(skb, nest2); 373 } 374 375 #if IS_ENABLED(CONFIG_IPV6) 376 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION, 377 v_opts->br_mcast_ctx.multicast_mld_version)) 378 goto out_err; 379 #endif 380 #endif 381 382 nla_nest_end(skb, nest); 383 384 return true; 385 386 out_err: 387 nla_nest_cancel(skb, nest); 388 return false; 389 } 390 391 static size_t rtnl_vlan_global_opts_nlmsg_size(const struct net_bridge_vlan *v) 392 { 393 return NLMSG_ALIGN(sizeof(struct br_vlan_msg)) 394 + nla_total_size(0) /* BRIDGE_VLANDB_GLOBAL_OPTIONS */ 395 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_ID */ 396 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 397 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING */ 398 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION */ 399 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION */ 400 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT */ 401 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT */ 402 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL */ 403 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL */ 404 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL */ 405 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL */ 406 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL */ 407 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL */ 408 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER */ 409 + br_multicast_querier_state_size() /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE */ 410 + nla_total_size(0) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */ 411 + br_rports_size(&v->br_mcast_ctx) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */ 412 #endif 413 + nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */ 414 } 415 416 static void br_vlan_global_opts_notify(const struct net_bridge *br, 417 u16 vid, u16 vid_range) 418 { 419 struct net_bridge_vlan *v; 420 struct br_vlan_msg *bvm; 421 struct nlmsghdr *nlh; 422 struct sk_buff *skb; 423 int err = -ENOBUFS; 424 425 /* right now notifications are done only with rtnl held */ 426 ASSERT_RTNL(); 427 428 /* need to find the vlan due to flags/options */ 429 v = br_vlan_find(br_vlan_group(br), vid); 430 if (!v) 431 return; 432 433 skb = nlmsg_new(rtnl_vlan_global_opts_nlmsg_size(v), GFP_KERNEL); 434 if (!skb) 435 goto out_err; 436 437 err = -EMSGSIZE; 438 nlh = nlmsg_put(skb, 0, 0, RTM_NEWVLAN, sizeof(*bvm), 0); 439 if (!nlh) 440 goto out_err; 441 bvm = nlmsg_data(nlh); 442 memset(bvm, 0, sizeof(*bvm)); 443 bvm->family = AF_BRIDGE; 444 bvm->ifindex = br->dev->ifindex; 445 446 if (!br_vlan_global_opts_fill(skb, vid, vid_range, v)) 447 goto out_err; 448 449 nlmsg_end(skb, nlh); 450 rtnl_notify(skb, dev_net(br->dev), 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL); 451 return; 452 453 out_err: 454 rtnl_set_sk_err(dev_net(br->dev), RTNLGRP_BRVLAN, err); 455 kfree_skb(skb); 456 } 457 458 static int br_vlan_process_global_one_opts(const struct net_bridge *br, 459 struct net_bridge_vlan_group *vg, 460 struct net_bridge_vlan *v, 461 struct nlattr **tb, 462 bool *changed, 463 struct netlink_ext_ack *extack) 464 { 465 int err __maybe_unused; 466 467 *changed = false; 468 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 469 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]) { 470 u8 mc_snooping; 471 472 mc_snooping = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]); 473 if (br_multicast_toggle_global_vlan(v, !!mc_snooping)) 474 *changed = true; 475 } 476 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]) { 477 u8 ver; 478 479 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]); 480 err = br_multicast_set_igmp_version(&v->br_mcast_ctx, ver); 481 if (err) 482 return err; 483 *changed = true; 484 } 485 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]) { 486 u32 cnt; 487 488 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]); 489 v->br_mcast_ctx.multicast_last_member_count = cnt; 490 *changed = true; 491 } 492 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]) { 493 u32 cnt; 494 495 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]); 496 v->br_mcast_ctx.multicast_startup_query_count = cnt; 497 *changed = true; 498 } 499 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]) { 500 u64 val; 501 502 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]); 503 v->br_mcast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val); 504 *changed = true; 505 } 506 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]) { 507 u64 val; 508 509 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]); 510 v->br_mcast_ctx.multicast_membership_interval = clock_t_to_jiffies(val); 511 *changed = true; 512 } 513 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]) { 514 u64 val; 515 516 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]); 517 v->br_mcast_ctx.multicast_querier_interval = clock_t_to_jiffies(val); 518 *changed = true; 519 } 520 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]) { 521 u64 val; 522 523 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]); 524 v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val); 525 *changed = true; 526 } 527 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) { 528 u64 val; 529 530 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]); 531 v->br_mcast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val); 532 *changed = true; 533 } 534 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]) { 535 u64 val; 536 537 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]); 538 v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); 539 *changed = true; 540 } 541 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) { 542 u8 val; 543 544 val = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]); 545 err = br_multicast_set_querier(&v->br_mcast_ctx, val); 546 if (err) 547 return err; 548 *changed = true; 549 } 550 #if IS_ENABLED(CONFIG_IPV6) 551 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]) { 552 u8 ver; 553 554 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]); 555 err = br_multicast_set_mld_version(&v->br_mcast_ctx, ver); 556 if (err) 557 return err; 558 *changed = true; 559 } 560 #endif 561 #endif 562 563 return 0; 564 } 565 566 static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = { 567 [BRIDGE_VLANDB_GOPTS_ID] = { .type = NLA_U16 }, 568 [BRIDGE_VLANDB_GOPTS_RANGE] = { .type = NLA_U16 }, 569 [BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING] = { .type = NLA_U8 }, 570 [BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 571 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 572 [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER] = { .type = NLA_U8 }, 573 [BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 574 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 575 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 576 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 577 [BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 578 [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 579 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 580 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 581 }; 582 583 int br_vlan_rtm_process_global_options(struct net_device *dev, 584 const struct nlattr *attr, 585 int cmd, 586 struct netlink_ext_ack *extack) 587 { 588 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL; 589 struct nlattr *tb[BRIDGE_VLANDB_GOPTS_MAX + 1]; 590 struct net_bridge_vlan_group *vg; 591 u16 vid, vid_range = 0; 592 struct net_bridge *br; 593 int err = 0; 594 595 if (cmd != RTM_NEWVLAN) { 596 NL_SET_ERR_MSG_MOD(extack, "Global vlan options support only set operation"); 597 return -EINVAL; 598 } 599 if (!netif_is_bridge_master(dev)) { 600 NL_SET_ERR_MSG_MOD(extack, "Global vlan options can only be set on bridge device"); 601 return -EINVAL; 602 } 603 br = netdev_priv(dev); 604 vg = br_vlan_group(br); 605 if (WARN_ON(!vg)) 606 return -ENODEV; 607 608 err = nla_parse_nested(tb, BRIDGE_VLANDB_GOPTS_MAX, attr, 609 br_vlan_db_gpol, extack); 610 if (err) 611 return err; 612 613 if (!tb[BRIDGE_VLANDB_GOPTS_ID]) { 614 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry id"); 615 return -EINVAL; 616 } 617 vid = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_ID]); 618 if (!br_vlan_valid_id(vid, extack)) 619 return -EINVAL; 620 621 if (tb[BRIDGE_VLANDB_GOPTS_RANGE]) { 622 vid_range = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_RANGE]); 623 if (!br_vlan_valid_id(vid_range, extack)) 624 return -EINVAL; 625 if (vid >= vid_range) { 626 NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id"); 627 return -EINVAL; 628 } 629 } else { 630 vid_range = vid; 631 } 632 633 for (; vid <= vid_range; vid++) { 634 bool changed = false; 635 636 v = br_vlan_find(vg, vid); 637 if (!v) { 638 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process global options"); 639 err = -ENOENT; 640 break; 641 } 642 643 err = br_vlan_process_global_one_opts(br, vg, v, tb, &changed, 644 extack); 645 if (err) 646 break; 647 648 if (changed) { 649 /* vlan options changed, check for range */ 650 if (!curr_start) { 651 curr_start = v; 652 curr_end = v; 653 continue; 654 } 655 656 if (!br_vlan_global_opts_can_enter_range(v, curr_end)) { 657 br_vlan_global_opts_notify(br, curr_start->vid, 658 curr_end->vid); 659 curr_start = v; 660 } 661 curr_end = v; 662 } else { 663 /* nothing changed and nothing to notify yet */ 664 if (!curr_start) 665 continue; 666 667 br_vlan_global_opts_notify(br, curr_start->vid, 668 curr_end->vid); 669 curr_start = NULL; 670 curr_end = NULL; 671 } 672 } 673 if (curr_start) 674 br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid); 675 676 return err; 677 } 678