1 #include <linux/kernel.h> 2 #include <linux/netdevice.h> 3 #include <linux/rtnetlink.h> 4 #include <linux/slab.h> 5 #include <net/switchdev.h> 6 7 #include "br_private.h" 8 9 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg, 10 const void *ptr) 11 { 12 const struct net_bridge_vlan *vle = ptr; 13 u16 vid = *(u16 *)arg->key; 14 15 return vle->vid != vid; 16 } 17 18 static const struct rhashtable_params br_vlan_rht_params = { 19 .head_offset = offsetof(struct net_bridge_vlan, vnode), 20 .key_offset = offsetof(struct net_bridge_vlan, vid), 21 .key_len = sizeof(u16), 22 .nelem_hint = 3, 23 .locks_mul = 1, 24 .max_size = VLAN_N_VID, 25 .obj_cmpfn = br_vlan_cmp, 26 .automatic_shrinking = true, 27 }; 28 29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid) 30 { 31 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params); 32 } 33 34 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid) 35 { 36 if (vg->pvid == vid) 37 return; 38 39 smp_wmb(); 40 vg->pvid = vid; 41 } 42 43 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid) 44 { 45 if (vg->pvid != vid) 46 return; 47 48 smp_wmb(); 49 vg->pvid = 0; 50 } 51 52 static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags) 53 { 54 struct net_bridge_vlan_group *vg; 55 56 if (br_vlan_is_master(v)) 57 vg = br_vlan_group(v->br); 58 else 59 vg = nbp_vlan_group(v->port); 60 61 if (flags & BRIDGE_VLAN_INFO_PVID) 62 __vlan_add_pvid(vg, v->vid); 63 else 64 __vlan_delete_pvid(vg, v->vid); 65 66 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) 67 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 68 else 69 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; 70 } 71 72 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, 73 u16 vid, u16 flags) 74 { 75 struct switchdev_obj_port_vlan v = { 76 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 77 .flags = flags, 78 .vid_begin = vid, 79 .vid_end = vid, 80 }; 81 int err; 82 83 /* Try switchdev op first. In case it is not supported, fallback to 84 * 8021q add. 85 */ 86 err = switchdev_port_obj_add(dev, &v.obj); 87 if (err == -EOPNOTSUPP) 88 return vlan_vid_add(dev, br->vlan_proto, vid); 89 return err; 90 } 91 92 static void __vlan_add_list(struct net_bridge_vlan *v) 93 { 94 struct net_bridge_vlan_group *vg; 95 struct list_head *headp, *hpos; 96 struct net_bridge_vlan *vent; 97 98 if (br_vlan_is_master(v)) 99 vg = br_vlan_group(v->br); 100 else 101 vg = nbp_vlan_group(v->port); 102 103 headp = &vg->vlan_list; 104 list_for_each_prev(hpos, headp) { 105 vent = list_entry(hpos, struct net_bridge_vlan, vlist); 106 if (v->vid < vent->vid) 107 continue; 108 else 109 break; 110 } 111 list_add_rcu(&v->vlist, hpos); 112 } 113 114 static void __vlan_del_list(struct net_bridge_vlan *v) 115 { 116 list_del_rcu(&v->vlist); 117 } 118 119 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, 120 u16 vid) 121 { 122 struct switchdev_obj_port_vlan v = { 123 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 124 .vid_begin = vid, 125 .vid_end = vid, 126 }; 127 int err; 128 129 /* Try switchdev op first. In case it is not supported, fallback to 130 * 8021q del. 131 */ 132 err = switchdev_port_obj_del(dev, &v.obj); 133 if (err == -EOPNOTSUPP) { 134 vlan_vid_del(dev, br->vlan_proto, vid); 135 return 0; 136 } 137 return err; 138 } 139 140 /* Returns a master vlan, if it didn't exist it gets created. In all cases a 141 * a reference is taken to the master vlan before returning. 142 */ 143 static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid) 144 { 145 struct net_bridge_vlan_group *vg; 146 struct net_bridge_vlan *masterv; 147 148 vg = br_vlan_group(br); 149 masterv = br_vlan_find(vg, vid); 150 if (!masterv) { 151 /* missing global ctx, create it now */ 152 if (br_vlan_add(br, vid, 0)) 153 return NULL; 154 masterv = br_vlan_find(vg, vid); 155 if (WARN_ON(!masterv)) 156 return NULL; 157 } 158 atomic_inc(&masterv->refcnt); 159 160 return masterv; 161 } 162 163 static void br_vlan_put_master(struct net_bridge_vlan *masterv) 164 { 165 struct net_bridge_vlan_group *vg; 166 167 if (!br_vlan_is_master(masterv)) 168 return; 169 170 vg = br_vlan_group(masterv->br); 171 if (atomic_dec_and_test(&masterv->refcnt)) { 172 rhashtable_remove_fast(&vg->vlan_hash, 173 &masterv->vnode, br_vlan_rht_params); 174 __vlan_del_list(masterv); 175 kfree_rcu(masterv, rcu); 176 } 177 } 178 179 /* This is the shared VLAN add function which works for both ports and bridge 180 * devices. There are four possible calls to this function in terms of the 181 * vlan entry type: 182 * 1. vlan is being added on a port (no master flags, global entry exists) 183 * 2. vlan is being added on a bridge (both master and brentry flags) 184 * 3. vlan is being added on a port, but a global entry didn't exist which 185 * is being created right now (master flag set, brentry flag unset), the 186 * global entry is used for global per-vlan features, but not for filtering 187 * 4. same as 3 but with both master and brentry flags set so the entry 188 * will be used for filtering in both the port and the bridge 189 */ 190 static int __vlan_add(struct net_bridge_vlan *v, u16 flags) 191 { 192 struct net_bridge_vlan *masterv = NULL; 193 struct net_bridge_port *p = NULL; 194 struct net_bridge_vlan_group *vg; 195 struct net_device *dev; 196 struct net_bridge *br; 197 int err; 198 199 if (br_vlan_is_master(v)) { 200 br = v->br; 201 dev = br->dev; 202 vg = br_vlan_group(br); 203 } else { 204 p = v->port; 205 br = p->br; 206 dev = p->dev; 207 vg = nbp_vlan_group(p); 208 } 209 210 if (p) { 211 /* Add VLAN to the device filter if it is supported. 212 * This ensures tagged traffic enters the bridge when 213 * promiscuous mode is disabled by br_manage_promisc(). 214 */ 215 err = __vlan_vid_add(dev, br, v->vid, flags); 216 if (err) 217 goto out; 218 219 /* need to work on the master vlan too */ 220 if (flags & BRIDGE_VLAN_INFO_MASTER) { 221 err = br_vlan_add(br, v->vid, flags | 222 BRIDGE_VLAN_INFO_BRENTRY); 223 if (err) 224 goto out_filt; 225 } 226 227 masterv = br_vlan_get_master(br, v->vid); 228 if (!masterv) 229 goto out_filt; 230 v->brvlan = masterv; 231 } 232 233 /* Add the dev mac and count the vlan only if it's usable */ 234 if (br_vlan_should_use(v)) { 235 err = br_fdb_insert(br, p, dev->dev_addr, v->vid); 236 if (err) { 237 br_err(br, "failed insert local address into bridge forwarding table\n"); 238 goto out_filt; 239 } 240 vg->num_vlans++; 241 } 242 243 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode, 244 br_vlan_rht_params); 245 if (err) 246 goto out_fdb_insert; 247 248 __vlan_add_list(v); 249 __vlan_add_flags(v, flags); 250 out: 251 return err; 252 253 out_fdb_insert: 254 if (br_vlan_should_use(v)) { 255 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid); 256 vg->num_vlans--; 257 } 258 259 out_filt: 260 if (p) { 261 __vlan_vid_del(dev, br, v->vid); 262 if (masterv) { 263 br_vlan_put_master(masterv); 264 v->brvlan = NULL; 265 } 266 } 267 268 goto out; 269 } 270 271 static int __vlan_del(struct net_bridge_vlan *v) 272 { 273 struct net_bridge_vlan *masterv = v; 274 struct net_bridge_vlan_group *vg; 275 struct net_bridge_port *p = NULL; 276 int err = 0; 277 278 if (br_vlan_is_master(v)) { 279 vg = br_vlan_group(v->br); 280 } else { 281 p = v->port; 282 vg = nbp_vlan_group(v->port); 283 masterv = v->brvlan; 284 } 285 286 __vlan_delete_pvid(vg, v->vid); 287 if (p) { 288 err = __vlan_vid_del(p->dev, p->br, v->vid); 289 if (err) 290 goto out; 291 } 292 293 if (br_vlan_should_use(v)) { 294 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY; 295 vg->num_vlans--; 296 } 297 298 if (masterv != v) { 299 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode, 300 br_vlan_rht_params); 301 __vlan_del_list(v); 302 kfree_rcu(v, rcu); 303 } 304 305 br_vlan_put_master(masterv); 306 out: 307 return err; 308 } 309 310 static void __vlan_group_free(struct net_bridge_vlan_group *vg) 311 { 312 WARN_ON(!list_empty(&vg->vlan_list)); 313 rhashtable_destroy(&vg->vlan_hash); 314 kfree(vg); 315 } 316 317 static void __vlan_flush(struct net_bridge_vlan_group *vg) 318 { 319 struct net_bridge_vlan *vlan, *tmp; 320 321 __vlan_delete_pvid(vg, vg->pvid); 322 list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) 323 __vlan_del(vlan); 324 } 325 326 struct sk_buff *br_handle_vlan(struct net_bridge *br, 327 struct net_bridge_vlan_group *vg, 328 struct sk_buff *skb) 329 { 330 struct net_bridge_vlan *v; 331 u16 vid; 332 333 /* If this packet was not filtered at input, let it pass */ 334 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) 335 goto out; 336 337 /* At this point, we know that the frame was filtered and contains 338 * a valid vlan id. If the vlan id has untagged flag set, 339 * send untagged; otherwise, send tagged. 340 */ 341 br_vlan_get_tag(skb, &vid); 342 v = br_vlan_find(vg, vid); 343 /* Vlan entry must be configured at this point. The 344 * only exception is the bridge is set in promisc mode and the 345 * packet is destined for the bridge device. In this case 346 * pass the packet as is. 347 */ 348 if (!v || !br_vlan_should_use(v)) { 349 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) { 350 goto out; 351 } else { 352 kfree_skb(skb); 353 return NULL; 354 } 355 } 356 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 357 skb->vlan_tci = 0; 358 359 out: 360 return skb; 361 } 362 363 /* Called under RCU */ 364 static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto, 365 struct sk_buff *skb, u16 *vid) 366 { 367 const struct net_bridge_vlan *v; 368 bool tagged; 369 370 BR_INPUT_SKB_CB(skb)->vlan_filtered = true; 371 /* If vlan tx offload is disabled on bridge device and frame was 372 * sent from vlan device on the bridge device, it does not have 373 * HW accelerated vlan tag. 374 */ 375 if (unlikely(!skb_vlan_tag_present(skb) && 376 skb->protocol == proto)) { 377 skb = skb_vlan_untag(skb); 378 if (unlikely(!skb)) 379 return false; 380 } 381 382 if (!br_vlan_get_tag(skb, vid)) { 383 /* Tagged frame */ 384 if (skb->vlan_proto != proto) { 385 /* Protocol-mismatch, empty out vlan_tci for new tag */ 386 skb_push(skb, ETH_HLEN); 387 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, 388 skb_vlan_tag_get(skb)); 389 if (unlikely(!skb)) 390 return false; 391 392 skb_pull(skb, ETH_HLEN); 393 skb_reset_mac_len(skb); 394 *vid = 0; 395 tagged = false; 396 } else { 397 tagged = true; 398 } 399 } else { 400 /* Untagged frame */ 401 tagged = false; 402 } 403 404 if (!*vid) { 405 u16 pvid = br_get_pvid(vg); 406 407 /* Frame had a tag with VID 0 or did not have a tag. 408 * See if pvid is set on this port. That tells us which 409 * vlan untagged or priority-tagged traffic belongs to. 410 */ 411 if (!pvid) 412 goto drop; 413 414 /* PVID is set on this port. Any untagged or priority-tagged 415 * ingress frame is considered to belong to this vlan. 416 */ 417 *vid = pvid; 418 if (likely(!tagged)) 419 /* Untagged Frame. */ 420 __vlan_hwaccel_put_tag(skb, proto, pvid); 421 else 422 /* Priority-tagged Frame. 423 * At this point, We know that skb->vlan_tci had 424 * VLAN_TAG_PRESENT bit and its VID field was 0x000. 425 * We update only VID field and preserve PCP field. 426 */ 427 skb->vlan_tci |= pvid; 428 429 return true; 430 } 431 432 /* Frame had a valid vlan tag. See if vlan is allowed */ 433 v = br_vlan_find(vg, *vid); 434 if (v && br_vlan_should_use(v)) 435 return true; 436 drop: 437 kfree_skb(skb); 438 return false; 439 } 440 441 bool br_allowed_ingress(const struct net_bridge *br, 442 struct net_bridge_vlan_group *vg, struct sk_buff *skb, 443 u16 *vid) 444 { 445 /* If VLAN filtering is disabled on the bridge, all packets are 446 * permitted. 447 */ 448 if (!br->vlan_enabled) { 449 BR_INPUT_SKB_CB(skb)->vlan_filtered = false; 450 return true; 451 } 452 453 return __allowed_ingress(vg, br->vlan_proto, skb, vid); 454 } 455 456 /* Called under RCU. */ 457 bool br_allowed_egress(struct net_bridge_vlan_group *vg, 458 const struct sk_buff *skb) 459 { 460 const struct net_bridge_vlan *v; 461 u16 vid; 462 463 /* If this packet was not filtered at input, let it pass */ 464 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) 465 return true; 466 467 br_vlan_get_tag(skb, &vid); 468 v = br_vlan_find(vg, vid); 469 if (v && br_vlan_should_use(v)) 470 return true; 471 472 return false; 473 } 474 475 /* Called under RCU */ 476 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) 477 { 478 struct net_bridge_vlan_group *vg; 479 struct net_bridge *br = p->br; 480 481 /* If filtering was disabled at input, let it pass. */ 482 if (!br->vlan_enabled) 483 return true; 484 485 vg = nbp_vlan_group_rcu(p); 486 if (!vg || !vg->num_vlans) 487 return false; 488 489 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto) 490 *vid = 0; 491 492 if (!*vid) { 493 *vid = br_get_pvid(vg); 494 if (!*vid) 495 return false; 496 497 return true; 498 } 499 500 if (br_vlan_find(vg, *vid)) 501 return true; 502 503 return false; 504 } 505 506 /* Must be protected by RTNL. 507 * Must be called with vid in range from 1 to 4094 inclusive. 508 */ 509 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) 510 { 511 struct net_bridge_vlan_group *vg; 512 struct net_bridge_vlan *vlan; 513 int ret; 514 515 ASSERT_RTNL(); 516 517 vg = br_vlan_group(br); 518 vlan = br_vlan_find(vg, vid); 519 if (vlan) { 520 if (!br_vlan_is_brentry(vlan)) { 521 /* Trying to change flags of non-existent bridge vlan */ 522 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) 523 return -EINVAL; 524 /* It was only kept for port vlans, now make it real */ 525 ret = br_fdb_insert(br, NULL, br->dev->dev_addr, 526 vlan->vid); 527 if (ret) { 528 br_err(br, "failed insert local address into bridge forwarding table\n"); 529 return ret; 530 } 531 atomic_inc(&vlan->refcnt); 532 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY; 533 vg->num_vlans++; 534 } 535 __vlan_add_flags(vlan, flags); 536 return 0; 537 } 538 539 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 540 if (!vlan) 541 return -ENOMEM; 542 543 vlan->vid = vid; 544 vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER; 545 vlan->flags &= ~BRIDGE_VLAN_INFO_PVID; 546 vlan->br = br; 547 if (flags & BRIDGE_VLAN_INFO_BRENTRY) 548 atomic_set(&vlan->refcnt, 1); 549 ret = __vlan_add(vlan, flags); 550 if (ret) 551 kfree(vlan); 552 553 return ret; 554 } 555 556 /* Must be protected by RTNL. 557 * Must be called with vid in range from 1 to 4094 inclusive. 558 */ 559 int br_vlan_delete(struct net_bridge *br, u16 vid) 560 { 561 struct net_bridge_vlan_group *vg; 562 struct net_bridge_vlan *v; 563 564 ASSERT_RTNL(); 565 566 vg = br_vlan_group(br); 567 v = br_vlan_find(vg, vid); 568 if (!v || !br_vlan_is_brentry(v)) 569 return -ENOENT; 570 571 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid); 572 br_fdb_delete_by_port(br, NULL, vid, 0); 573 574 return __vlan_del(v); 575 } 576 577 void br_vlan_flush(struct net_bridge *br) 578 { 579 struct net_bridge_vlan_group *vg; 580 581 ASSERT_RTNL(); 582 583 vg = br_vlan_group(br); 584 __vlan_flush(vg); 585 RCU_INIT_POINTER(br->vlgrp, NULL); 586 synchronize_rcu(); 587 __vlan_group_free(vg); 588 } 589 590 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid) 591 { 592 if (!vg) 593 return NULL; 594 595 return br_vlan_lookup(&vg->vlan_hash, vid); 596 } 597 598 /* Must be protected by RTNL. */ 599 static void recalculate_group_addr(struct net_bridge *br) 600 { 601 if (br->group_addr_set) 602 return; 603 604 spin_lock_bh(&br->lock); 605 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) { 606 /* Bridge Group Address */ 607 br->group_addr[5] = 0x00; 608 } else { /* vlan_enabled && ETH_P_8021AD */ 609 /* Provider Bridge Group Address */ 610 br->group_addr[5] = 0x08; 611 } 612 spin_unlock_bh(&br->lock); 613 } 614 615 /* Must be protected by RTNL. */ 616 void br_recalculate_fwd_mask(struct net_bridge *br) 617 { 618 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) 619 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; 620 else /* vlan_enabled && ETH_P_8021AD */ 621 br->group_fwd_mask_required = BR_GROUPFWD_8021AD & 622 ~(1u << br->group_addr[5]); 623 } 624 625 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) 626 { 627 if (br->vlan_enabled == val) 628 return 0; 629 630 br->vlan_enabled = val; 631 br_manage_promisc(br); 632 recalculate_group_addr(br); 633 br_recalculate_fwd_mask(br); 634 635 return 0; 636 } 637 638 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) 639 { 640 if (!rtnl_trylock()) 641 return restart_syscall(); 642 643 __br_vlan_filter_toggle(br, val); 644 rtnl_unlock(); 645 646 return 0; 647 } 648 649 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) 650 { 651 int err = 0; 652 struct net_bridge_port *p; 653 struct net_bridge_vlan *vlan; 654 struct net_bridge_vlan_group *vg; 655 __be16 oldproto; 656 657 if (br->vlan_proto == proto) 658 return 0; 659 660 /* Add VLANs for the new proto to the device filter. */ 661 list_for_each_entry(p, &br->port_list, list) { 662 vg = nbp_vlan_group(p); 663 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 664 err = vlan_vid_add(p->dev, proto, vlan->vid); 665 if (err) 666 goto err_filt; 667 } 668 } 669 670 oldproto = br->vlan_proto; 671 br->vlan_proto = proto; 672 673 recalculate_group_addr(br); 674 br_recalculate_fwd_mask(br); 675 676 /* Delete VLANs for the old proto from the device filter. */ 677 list_for_each_entry(p, &br->port_list, list) { 678 vg = nbp_vlan_group(p); 679 list_for_each_entry(vlan, &vg->vlan_list, vlist) 680 vlan_vid_del(p->dev, oldproto, vlan->vid); 681 } 682 683 return 0; 684 685 err_filt: 686 list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) 687 vlan_vid_del(p->dev, proto, vlan->vid); 688 689 list_for_each_entry_continue_reverse(p, &br->port_list, list) { 690 vg = nbp_vlan_group(p); 691 list_for_each_entry(vlan, &vg->vlan_list, vlist) 692 vlan_vid_del(p->dev, proto, vlan->vid); 693 } 694 695 return err; 696 } 697 698 int br_vlan_set_proto(struct net_bridge *br, unsigned long val) 699 { 700 int err; 701 702 if (val != ETH_P_8021Q && val != ETH_P_8021AD) 703 return -EPROTONOSUPPORT; 704 705 if (!rtnl_trylock()) 706 return restart_syscall(); 707 708 err = __br_vlan_set_proto(br, htons(val)); 709 rtnl_unlock(); 710 711 return err; 712 } 713 714 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid) 715 { 716 struct net_bridge_vlan *v; 717 718 if (vid != vg->pvid) 719 return false; 720 721 v = br_vlan_lookup(&vg->vlan_hash, vid); 722 if (v && br_vlan_should_use(v) && 723 (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 724 return true; 725 726 return false; 727 } 728 729 static void br_vlan_disable_default_pvid(struct net_bridge *br) 730 { 731 struct net_bridge_port *p; 732 u16 pvid = br->default_pvid; 733 734 /* Disable default_pvid on all ports where it is still 735 * configured. 736 */ 737 if (vlan_default_pvid(br_vlan_group(br), pvid)) 738 br_vlan_delete(br, pvid); 739 740 list_for_each_entry(p, &br->port_list, list) { 741 if (vlan_default_pvid(nbp_vlan_group(p), pvid)) 742 nbp_vlan_delete(p, pvid); 743 } 744 745 br->default_pvid = 0; 746 } 747 748 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) 749 { 750 const struct net_bridge_vlan *pvent; 751 struct net_bridge_vlan_group *vg; 752 struct net_bridge_port *p; 753 u16 old_pvid; 754 int err = 0; 755 unsigned long *changed; 756 757 if (!pvid) { 758 br_vlan_disable_default_pvid(br); 759 return 0; 760 } 761 762 changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), 763 GFP_KERNEL); 764 if (!changed) 765 return -ENOMEM; 766 767 old_pvid = br->default_pvid; 768 769 /* Update default_pvid config only if we do not conflict with 770 * user configuration. 771 */ 772 vg = br_vlan_group(br); 773 pvent = br_vlan_find(vg, pvid); 774 if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) && 775 (!pvent || !br_vlan_should_use(pvent))) { 776 err = br_vlan_add(br, pvid, 777 BRIDGE_VLAN_INFO_PVID | 778 BRIDGE_VLAN_INFO_UNTAGGED | 779 BRIDGE_VLAN_INFO_BRENTRY); 780 if (err) 781 goto out; 782 br_vlan_delete(br, old_pvid); 783 set_bit(0, changed); 784 } 785 786 list_for_each_entry(p, &br->port_list, list) { 787 /* Update default_pvid config only if we do not conflict with 788 * user configuration. 789 */ 790 vg = nbp_vlan_group(p); 791 if ((old_pvid && 792 !vlan_default_pvid(vg, old_pvid)) || 793 br_vlan_find(vg, pvid)) 794 continue; 795 796 err = nbp_vlan_add(p, pvid, 797 BRIDGE_VLAN_INFO_PVID | 798 BRIDGE_VLAN_INFO_UNTAGGED); 799 if (err) 800 goto err_port; 801 nbp_vlan_delete(p, old_pvid); 802 set_bit(p->port_no, changed); 803 } 804 805 br->default_pvid = pvid; 806 807 out: 808 kfree(changed); 809 return err; 810 811 err_port: 812 list_for_each_entry_continue_reverse(p, &br->port_list, list) { 813 if (!test_bit(p->port_no, changed)) 814 continue; 815 816 if (old_pvid) 817 nbp_vlan_add(p, old_pvid, 818 BRIDGE_VLAN_INFO_PVID | 819 BRIDGE_VLAN_INFO_UNTAGGED); 820 nbp_vlan_delete(p, pvid); 821 } 822 823 if (test_bit(0, changed)) { 824 if (old_pvid) 825 br_vlan_add(br, old_pvid, 826 BRIDGE_VLAN_INFO_PVID | 827 BRIDGE_VLAN_INFO_UNTAGGED | 828 BRIDGE_VLAN_INFO_BRENTRY); 829 br_vlan_delete(br, pvid); 830 } 831 goto out; 832 } 833 834 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val) 835 { 836 u16 pvid = val; 837 int err = 0; 838 839 if (val >= VLAN_VID_MASK) 840 return -EINVAL; 841 842 if (!rtnl_trylock()) 843 return restart_syscall(); 844 845 if (pvid == br->default_pvid) 846 goto unlock; 847 848 /* Only allow default pvid change when filtering is disabled */ 849 if (br->vlan_enabled) { 850 pr_info_once("Please disable vlan filtering to change default_pvid\n"); 851 err = -EPERM; 852 goto unlock; 853 } 854 err = __br_vlan_set_default_pvid(br, pvid); 855 unlock: 856 rtnl_unlock(); 857 return err; 858 } 859 860 int br_vlan_init(struct net_bridge *br) 861 { 862 struct net_bridge_vlan_group *vg; 863 int ret = -ENOMEM; 864 865 vg = kzalloc(sizeof(*vg), GFP_KERNEL); 866 if (!vg) 867 goto out; 868 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); 869 if (ret) 870 goto err_rhtbl; 871 INIT_LIST_HEAD(&vg->vlan_list); 872 br->vlan_proto = htons(ETH_P_8021Q); 873 br->default_pvid = 1; 874 rcu_assign_pointer(br->vlgrp, vg); 875 ret = br_vlan_add(br, 1, 876 BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED | 877 BRIDGE_VLAN_INFO_BRENTRY); 878 if (ret) 879 goto err_vlan_add; 880 881 out: 882 return ret; 883 884 err_vlan_add: 885 rhashtable_destroy(&vg->vlan_hash); 886 err_rhtbl: 887 kfree(vg); 888 889 goto out; 890 } 891 892 int nbp_vlan_init(struct net_bridge_port *p) 893 { 894 struct net_bridge_vlan_group *vg; 895 int ret = -ENOMEM; 896 897 vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL); 898 if (!vg) 899 goto out; 900 901 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); 902 if (ret) 903 goto err_rhtbl; 904 INIT_LIST_HEAD(&vg->vlan_list); 905 rcu_assign_pointer(p->vlgrp, vg); 906 if (p->br->default_pvid) { 907 ret = nbp_vlan_add(p, p->br->default_pvid, 908 BRIDGE_VLAN_INFO_PVID | 909 BRIDGE_VLAN_INFO_UNTAGGED); 910 if (ret) 911 goto err_vlan_add; 912 } 913 out: 914 return ret; 915 916 err_vlan_add: 917 RCU_INIT_POINTER(p->vlgrp, NULL); 918 synchronize_rcu(); 919 rhashtable_destroy(&vg->vlan_hash); 920 err_rhtbl: 921 kfree(vg); 922 923 goto out; 924 } 925 926 /* Must be protected by RTNL. 927 * Must be called with vid in range from 1 to 4094 inclusive. 928 */ 929 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 930 { 931 struct net_bridge_vlan *vlan; 932 int ret; 933 934 ASSERT_RTNL(); 935 936 vlan = br_vlan_find(nbp_vlan_group(port), vid); 937 if (vlan) { 938 __vlan_add_flags(vlan, flags); 939 return 0; 940 } 941 942 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 943 if (!vlan) 944 return -ENOMEM; 945 946 vlan->vid = vid; 947 vlan->port = port; 948 ret = __vlan_add(vlan, flags); 949 if (ret) 950 kfree(vlan); 951 952 return ret; 953 } 954 955 /* Must be protected by RTNL. 956 * Must be called with vid in range from 1 to 4094 inclusive. 957 */ 958 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) 959 { 960 struct net_bridge_vlan *v; 961 962 ASSERT_RTNL(); 963 964 v = br_vlan_find(nbp_vlan_group(port), vid); 965 if (!v) 966 return -ENOENT; 967 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid); 968 br_fdb_delete_by_port(port->br, port, vid, 0); 969 970 return __vlan_del(v); 971 } 972 973 void nbp_vlan_flush(struct net_bridge_port *port) 974 { 975 struct net_bridge_vlan_group *vg; 976 977 ASSERT_RTNL(); 978 979 vg = nbp_vlan_group(port); 980 __vlan_flush(vg); 981 RCU_INIT_POINTER(port->vlgrp, NULL); 982 synchronize_rcu(); 983 __vlan_group_free(vg); 984 } 985