xref: /openbmc/linux/net/bridge/br_vlan.c (revision 2594e906)
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6 
7 #include "br_private.h"
8 
9 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
10 			      const void *ptr)
11 {
12 	const struct net_bridge_vlan *vle = ptr;
13 	u16 vid = *(u16 *)arg->key;
14 
15 	return vle->vid != vid;
16 }
17 
18 static const struct rhashtable_params br_vlan_rht_params = {
19 	.head_offset = offsetof(struct net_bridge_vlan, vnode),
20 	.key_offset = offsetof(struct net_bridge_vlan, vid),
21 	.key_len = sizeof(u16),
22 	.max_size = VLAN_N_VID,
23 	.obj_cmpfn = br_vlan_cmp,
24 	.automatic_shrinking = true,
25 };
26 
27 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
28 {
29 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
30 }
31 
32 static void __vlan_add_pvid(u16 *pvid, u16 vid)
33 {
34 	if (*pvid == vid)
35 		return;
36 
37 	smp_wmb();
38 	*pvid = vid;
39 }
40 
41 static void __vlan_delete_pvid(u16 *pvid, u16 vid)
42 {
43 	if (*pvid != vid)
44 		return;
45 
46 	smp_wmb();
47 	*pvid = 0;
48 }
49 
50 static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
51 {
52 	if (flags & BRIDGE_VLAN_INFO_PVID) {
53 		if (br_vlan_is_master(v))
54 			__vlan_add_pvid(&v->br->pvid, v->vid);
55 		else
56 			__vlan_add_pvid(&v->port->pvid, v->vid);
57 	} else {
58 		if (br_vlan_is_master(v))
59 			__vlan_delete_pvid(&v->br->pvid, v->vid);
60 		else
61 			__vlan_delete_pvid(&v->port->pvid, v->vid);
62 	}
63 
64 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
65 		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
66 	else
67 		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
68 }
69 
70 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
71 			  u16 vid, u16 flags)
72 {
73 	const struct net_device_ops *ops = dev->netdev_ops;
74 	int err;
75 
76 	/* If driver uses VLAN ndo ops, use 8021q to install vid
77 	 * on device, otherwise try switchdev ops to install vid.
78 	 */
79 
80 	if (ops->ndo_vlan_rx_add_vid) {
81 		err = vlan_vid_add(dev, br->vlan_proto, vid);
82 	} else {
83 		struct switchdev_obj vlan_obj = {
84 			.id = SWITCHDEV_OBJ_PORT_VLAN,
85 			.u.vlan = {
86 				.flags = flags,
87 				.vid_begin = vid,
88 				.vid_end = vid,
89 			},
90 		};
91 
92 		err = switchdev_port_obj_add(dev, &vlan_obj);
93 		if (err == -EOPNOTSUPP)
94 			err = 0;
95 	}
96 
97 	return err;
98 }
99 
100 static void __vlan_add_list(struct net_bridge_vlan *v)
101 {
102 	struct list_head *headp, *hpos;
103 	struct net_bridge_vlan *vent;
104 
105 	headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
106 				       &v->port->vlgrp->vlan_list;
107 	list_for_each_prev(hpos, headp) {
108 		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
109 		if (v->vid < vent->vid)
110 			continue;
111 		else
112 			break;
113 	}
114 	list_add(&v->vlist, hpos);
115 }
116 
117 static void __vlan_del_list(struct net_bridge_vlan *v)
118 {
119 	list_del(&v->vlist);
120 }
121 
122 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
123 			  u16 vid)
124 {
125 	const struct net_device_ops *ops = dev->netdev_ops;
126 	int err = 0;
127 
128 	/* If driver uses VLAN ndo ops, use 8021q to delete vid
129 	 * on device, otherwise try switchdev ops to delete vid.
130 	 */
131 
132 	if (ops->ndo_vlan_rx_kill_vid) {
133 		vlan_vid_del(dev, br->vlan_proto, vid);
134 	} else {
135 		struct switchdev_obj vlan_obj = {
136 			.id = SWITCHDEV_OBJ_PORT_VLAN,
137 			.u.vlan = {
138 				.vid_begin = vid,
139 				.vid_end = vid,
140 			},
141 		};
142 
143 		err = switchdev_port_obj_del(dev, &vlan_obj);
144 		if (err == -EOPNOTSUPP)
145 			err = 0;
146 	}
147 
148 	return err;
149 }
150 
151 /* This is the shared VLAN add function which works for both ports and bridge
152  * devices. There are four possible calls to this function in terms of the
153  * vlan entry type:
154  * 1. vlan is being added on a port (no master flags, global entry exists)
155  * 2. vlan is being added on a bridge (both master and brvlan flags)
156  * 3. vlan is being added on a port, but a global entry didn't exist which
157  *    is being created right now (master flag set, brvlan flag unset), the
158  *    global entry is used for global per-vlan features, but not for filtering
159  * 4. same as 3 but with both master and brvlan flags set so the entry
160  *    will be used for filtering in both the port and the bridge
161  */
162 static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
163 {
164 	struct net_bridge_vlan *masterv = NULL;
165 	struct net_bridge_port *p = NULL;
166 	struct rhashtable *tbl;
167 	struct net_device *dev;
168 	struct net_bridge *br;
169 	int err;
170 
171 	if (br_vlan_is_master(v)) {
172 		br = v->br;
173 		dev = br->dev;
174 		tbl = &br->vlgrp->vlan_hash;
175 	} else {
176 		p = v->port;
177 		br = p->br;
178 		dev = p->dev;
179 		tbl = &p->vlgrp->vlan_hash;
180 	}
181 
182 	if (p) {
183 		u16 master_flags = flags;
184 
185 		/* Add VLAN to the device filter if it is supported.
186 		 * This ensures tagged traffic enters the bridge when
187 		 * promiscuous mode is disabled by br_manage_promisc().
188 		 */
189 		err = __vlan_vid_add(dev, br, v->vid, flags);
190 		if (err)
191 			goto out;
192 
193 		/* need to work on the master vlan too */
194 		if (flags & BRIDGE_VLAN_INFO_MASTER) {
195 			master_flags |= BRIDGE_VLAN_INFO_BRENTRY;
196 			err = br_vlan_add(br, v->vid, master_flags);
197 			if (err)
198 				goto out_filt;
199 		}
200 
201 		masterv = br_vlan_find(br->vlgrp, v->vid);
202 		if (!masterv) {
203 			/* missing global ctx, create it now */
204 			err = br_vlan_add(br, v->vid, master_flags);
205 			if (err)
206 				goto out_filt;
207 			masterv = br_vlan_find(br->vlgrp, v->vid);
208 			WARN_ON(!masterv);
209 		}
210 		atomic_inc(&masterv->refcnt);
211 		v->brvlan = masterv;
212 	}
213 
214 	/* Add the dev mac only if it's a usable vlan */
215 	if (br_vlan_should_use(v)) {
216 		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
217 		if (err) {
218 			br_err(br, "failed insert local address into bridge forwarding table\n");
219 			goto out_filt;
220 		}
221 	}
222 
223 	err = rhashtable_lookup_insert_fast(tbl, &v->vnode, br_vlan_rht_params);
224 	if (err)
225 		goto out_fdb_insert;
226 
227 	__vlan_add_list(v);
228 	__vlan_add_flags(v, flags);
229 	if (br_vlan_is_master(v)) {
230 		if (br_vlan_is_brentry(v))
231 			br->vlgrp->num_vlans++;
232 	} else {
233 		p->vlgrp->num_vlans++;
234 	}
235 out:
236 	return err;
237 
238 out_fdb_insert:
239 	br_fdb_find_delete_local(br, p, br->dev->dev_addr, v->vid);
240 
241 out_filt:
242 	if (p) {
243 		__vlan_vid_del(dev, br, v->vid);
244 		if (masterv) {
245 			atomic_dec(&masterv->refcnt);
246 			v->brvlan = NULL;
247 		}
248 	}
249 
250 	goto out;
251 }
252 
253 static int __vlan_del(struct net_bridge_vlan *v)
254 {
255 	struct net_bridge_vlan *masterv = v;
256 	struct net_bridge_port *p = NULL;
257 	struct net_bridge *br;
258 	int err = 0;
259 	struct rhashtable *tbl;
260 	u16 *pvid;
261 
262 	if (br_vlan_is_master(v)) {
263 		br = v->br;
264 		tbl = &v->br->vlgrp->vlan_hash;
265 		pvid = &v->br->pvid;
266 	} else {
267 		p = v->port;
268 		br = p->br;
269 		tbl = &p->vlgrp->vlan_hash;
270 		masterv = v->brvlan;
271 		pvid = &p->pvid;
272 	}
273 
274 	__vlan_delete_pvid(pvid, v->vid);
275 	if (p) {
276 		err = __vlan_vid_del(p->dev, p->br, v->vid);
277 		if (err)
278 			goto out;
279 	}
280 
281 	if (br_vlan_is_master(v)) {
282 		if (br_vlan_is_brentry(v)) {
283 			v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
284 			br->vlgrp->num_vlans--;
285 		}
286 	} else {
287 		p->vlgrp->num_vlans--;
288 	}
289 
290 	if (masterv != v) {
291 		rhashtable_remove_fast(tbl, &v->vnode, br_vlan_rht_params);
292 		__vlan_del_list(v);
293 		kfree_rcu(v, rcu);
294 	}
295 
296 	if (atomic_dec_and_test(&masterv->refcnt)) {
297 		rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
298 				       &masterv->vnode, br_vlan_rht_params);
299 		__vlan_del_list(masterv);
300 		kfree_rcu(masterv, rcu);
301 	}
302 out:
303 	return err;
304 }
305 
306 static void __vlan_flush(struct net_bridge_vlan_group *vlgrp, u16 *pvid)
307 {
308 	struct net_bridge_vlan *vlan, *tmp;
309 
310 	__vlan_delete_pvid(pvid, *pvid);
311 	list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
312 		__vlan_del(vlan);
313 	rhashtable_destroy(&vlgrp->vlan_hash);
314 	kfree(vlgrp);
315 }
316 
317 struct sk_buff *br_handle_vlan(struct net_bridge *br,
318 			       struct net_bridge_vlan_group *vg,
319 			       struct sk_buff *skb)
320 {
321 	struct net_bridge_vlan *v;
322 	u16 vid;
323 
324 	/* If this packet was not filtered at input, let it pass */
325 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
326 		goto out;
327 
328 	/* At this point, we know that the frame was filtered and contains
329 	 * a valid vlan id.  If the vlan id has untagged flag set,
330 	 * send untagged; otherwise, send tagged.
331 	 */
332 	br_vlan_get_tag(skb, &vid);
333 	v = br_vlan_find(vg, vid);
334 	/* Vlan entry must be configured at this point.  The
335 	 * only exception is the bridge is set in promisc mode and the
336 	 * packet is destined for the bridge device.  In this case
337 	 * pass the packet as is.
338 	 */
339 	if (!v || !br_vlan_should_use(v)) {
340 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
341 			goto out;
342 		} else {
343 			kfree_skb(skb);
344 			return NULL;
345 		}
346 	}
347 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
348 		skb->vlan_tci = 0;
349 
350 out:
351 	return skb;
352 }
353 
354 /* Called under RCU */
355 static bool __allowed_ingress(struct rhashtable *tbl, u16 pvid, __be16 proto,
356 			      struct sk_buff *skb, u16 *vid)
357 {
358 	const struct net_bridge_vlan *v;
359 	bool tagged;
360 
361 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
362 	/* If vlan tx offload is disabled on bridge device and frame was
363 	 * sent from vlan device on the bridge device, it does not have
364 	 * HW accelerated vlan tag.
365 	 */
366 	if (unlikely(!skb_vlan_tag_present(skb) &&
367 		     skb->protocol == proto)) {
368 		skb = skb_vlan_untag(skb);
369 		if (unlikely(!skb))
370 			return false;
371 	}
372 
373 	if (!br_vlan_get_tag(skb, vid)) {
374 		/* Tagged frame */
375 		if (skb->vlan_proto != proto) {
376 			/* Protocol-mismatch, empty out vlan_tci for new tag */
377 			skb_push(skb, ETH_HLEN);
378 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
379 							skb_vlan_tag_get(skb));
380 			if (unlikely(!skb))
381 				return false;
382 
383 			skb_pull(skb, ETH_HLEN);
384 			skb_reset_mac_len(skb);
385 			*vid = 0;
386 			tagged = false;
387 		} else {
388 			tagged = true;
389 		}
390 	} else {
391 		/* Untagged frame */
392 		tagged = false;
393 	}
394 
395 	if (!*vid) {
396 		/* Frame had a tag with VID 0 or did not have a tag.
397 		 * See if pvid is set on this port.  That tells us which
398 		 * vlan untagged or priority-tagged traffic belongs to.
399 		 */
400 		if (!pvid)
401 			goto drop;
402 
403 		/* PVID is set on this port.  Any untagged or priority-tagged
404 		 * ingress frame is considered to belong to this vlan.
405 		 */
406 		*vid = pvid;
407 		if (likely(!tagged))
408 			/* Untagged Frame. */
409 			__vlan_hwaccel_put_tag(skb, proto, pvid);
410 		else
411 			/* Priority-tagged Frame.
412 			 * At this point, We know that skb->vlan_tci had
413 			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
414 			 * We update only VID field and preserve PCP field.
415 			 */
416 			skb->vlan_tci |= pvid;
417 
418 		return true;
419 	}
420 
421 	/* Frame had a valid vlan tag.  See if vlan is allowed */
422 	v = br_vlan_lookup(tbl, *vid);
423 	if (v && br_vlan_should_use(v))
424 		return true;
425 drop:
426 	kfree_skb(skb);
427 	return false;
428 }
429 
430 bool br_allowed_ingress(struct net_bridge *br, struct sk_buff *skb, u16 *vid)
431 {
432 	/* If VLAN filtering is disabled on the bridge, all packets are
433 	 * permitted.
434 	 */
435 	if (!br->vlan_enabled) {
436 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
437 		return true;
438 	}
439 
440 	return __allowed_ingress(&br->vlgrp->vlan_hash, br->pvid,
441 				 br->vlan_proto, skb, vid);
442 }
443 
444 bool nbp_allowed_ingress(struct net_bridge_port *p, struct sk_buff *skb,
445 			 u16 *vid)
446 {
447 	struct net_bridge *br = p->br;
448 
449 	/* If VLAN filtering is disabled on the bridge, all packets are
450 	 * permitted.
451 	 */
452 	if (!br->vlan_enabled) {
453 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
454 		return true;
455 	}
456 
457 	return __allowed_ingress(&p->vlgrp->vlan_hash, p->pvid, br->vlan_proto,
458 				 skb, vid);
459 }
460 
461 /* Called under RCU. */
462 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
463 		       const struct sk_buff *skb)
464 {
465 	const struct net_bridge_vlan *v;
466 	u16 vid;
467 
468 	/* If this packet was not filtered at input, let it pass */
469 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
470 		return true;
471 
472 	br_vlan_get_tag(skb, &vid);
473 	v = br_vlan_find(vg, vid);
474 	if (v && br_vlan_should_use(v))
475 		return true;
476 
477 	return false;
478 }
479 
480 /* Called under RCU */
481 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
482 {
483 	struct net_bridge *br = p->br;
484 
485 	/* If filtering was disabled at input, let it pass. */
486 	if (!br->vlan_enabled)
487 		return true;
488 
489 	if (!p->vlgrp->num_vlans)
490 		return false;
491 
492 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
493 		*vid = 0;
494 
495 	if (!*vid) {
496 		*vid = nbp_get_pvid(p);
497 		if (!*vid)
498 			return false;
499 
500 		return true;
501 	}
502 
503 	if (br_vlan_find(p->vlgrp, *vid))
504 		return true;
505 
506 	return false;
507 }
508 
509 /* Must be protected by RTNL.
510  * Must be called with vid in range from 1 to 4094 inclusive.
511  */
512 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
513 {
514 	struct net_bridge_vlan *vlan;
515 	int ret;
516 
517 	ASSERT_RTNL();
518 
519 	vlan = br_vlan_find(br->vlgrp, vid);
520 	if (vlan) {
521 		if (!br_vlan_is_brentry(vlan)) {
522 			/* Trying to change flags of non-existent bridge vlan */
523 			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
524 				return -EINVAL;
525 			/* It was only kept for port vlans, now make it real */
526 			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
527 					    vlan->vid);
528 			if (ret) {
529 				br_err(br, "failed insert local address into bridge forwarding table\n");
530 				return ret;
531 			}
532 			atomic_inc(&vlan->refcnt);
533 			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
534 			br->vlgrp->num_vlans++;
535 		}
536 		__vlan_add_flags(vlan, flags);
537 		return 0;
538 	}
539 
540 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
541 	if (!vlan)
542 		return -ENOMEM;
543 
544 	vlan->vid = vid;
545 	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
546 	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
547 	vlan->br = br;
548 	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
549 		atomic_set(&vlan->refcnt, 1);
550 	ret = __vlan_add(vlan, flags);
551 	if (ret)
552 		kfree(vlan);
553 
554 	return ret;
555 }
556 
557 /* Must be protected by RTNL.
558  * Must be called with vid in range from 1 to 4094 inclusive.
559  */
560 int br_vlan_delete(struct net_bridge *br, u16 vid)
561 {
562 	struct net_bridge_vlan *v;
563 
564 	ASSERT_RTNL();
565 
566 	v = br_vlan_find(br->vlgrp, vid);
567 	if (!v || !br_vlan_is_brentry(v))
568 		return -ENOENT;
569 
570 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
571 
572 	return __vlan_del(v);
573 }
574 
575 void br_vlan_flush(struct net_bridge *br)
576 {
577 	ASSERT_RTNL();
578 
579 	__vlan_flush(br_vlan_group(br), &br->pvid);
580 }
581 
582 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
583 {
584 	if (!vg)
585 		return NULL;
586 
587 	return br_vlan_lookup(&vg->vlan_hash, vid);
588 }
589 
590 /* Must be protected by RTNL. */
591 static void recalculate_group_addr(struct net_bridge *br)
592 {
593 	if (br->group_addr_set)
594 		return;
595 
596 	spin_lock_bh(&br->lock);
597 	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
598 		/* Bridge Group Address */
599 		br->group_addr[5] = 0x00;
600 	} else { /* vlan_enabled && ETH_P_8021AD */
601 		/* Provider Bridge Group Address */
602 		br->group_addr[5] = 0x08;
603 	}
604 	spin_unlock_bh(&br->lock);
605 }
606 
607 /* Must be protected by RTNL. */
608 void br_recalculate_fwd_mask(struct net_bridge *br)
609 {
610 	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
611 		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
612 	else /* vlan_enabled && ETH_P_8021AD */
613 		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
614 					      ~(1u << br->group_addr[5]);
615 }
616 
617 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
618 {
619 	if (br->vlan_enabled == val)
620 		return 0;
621 
622 	br->vlan_enabled = val;
623 	br_manage_promisc(br);
624 	recalculate_group_addr(br);
625 	br_recalculate_fwd_mask(br);
626 
627 	return 0;
628 }
629 
630 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
631 {
632 	if (!rtnl_trylock())
633 		return restart_syscall();
634 
635 	__br_vlan_filter_toggle(br, val);
636 	rtnl_unlock();
637 
638 	return 0;
639 }
640 
641 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
642 {
643 	int err = 0;
644 	struct net_bridge_port *p;
645 	struct net_bridge_vlan *vlan;
646 	__be16 oldproto;
647 
648 	if (br->vlan_proto == proto)
649 		return 0;
650 
651 	/* Add VLANs for the new proto to the device filter. */
652 	list_for_each_entry(p, &br->port_list, list) {
653 		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
654 			err = vlan_vid_add(p->dev, proto, vlan->vid);
655 			if (err)
656 				goto err_filt;
657 		}
658 	}
659 
660 	oldproto = br->vlan_proto;
661 	br->vlan_proto = proto;
662 
663 	recalculate_group_addr(br);
664 	br_recalculate_fwd_mask(br);
665 
666 	/* Delete VLANs for the old proto from the device filter. */
667 	list_for_each_entry(p, &br->port_list, list)
668 		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
669 			vlan_vid_del(p->dev, oldproto, vlan->vid);
670 
671 	return 0;
672 
673 err_filt:
674 	list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
675 		vlan_vid_del(p->dev, proto, vlan->vid);
676 
677 	list_for_each_entry_continue_reverse(p, &br->port_list, list)
678 		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
679 			vlan_vid_del(p->dev, proto, vlan->vid);
680 
681 	return err;
682 }
683 
684 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
685 {
686 	int err;
687 
688 	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
689 		return -EPROTONOSUPPORT;
690 
691 	if (!rtnl_trylock())
692 		return restart_syscall();
693 
694 	err = __br_vlan_set_proto(br, htons(val));
695 	rtnl_unlock();
696 
697 	return err;
698 }
699 
700 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 pvid,
701 			      u16 vid)
702 {
703 	struct net_bridge_vlan *v;
704 
705 	if (vid != pvid)
706 		return false;
707 
708 	v = br_vlan_lookup(&vg->vlan_hash, vid);
709 	if (v && br_vlan_should_use(v) &&
710 	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
711 		return true;
712 
713 	return false;
714 }
715 
716 static void br_vlan_disable_default_pvid(struct net_bridge *br)
717 {
718 	struct net_bridge_port *p;
719 	u16 pvid = br->default_pvid;
720 
721 	/* Disable default_pvid on all ports where it is still
722 	 * configured.
723 	 */
724 	if (vlan_default_pvid(br->vlgrp, br->pvid, pvid))
725 		br_vlan_delete(br, pvid);
726 
727 	list_for_each_entry(p, &br->port_list, list) {
728 		if (vlan_default_pvid(p->vlgrp, p->pvid, pvid))
729 			nbp_vlan_delete(p, pvid);
730 	}
731 
732 	br->default_pvid = 0;
733 }
734 
735 static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
736 {
737 	const struct net_bridge_vlan *pvent;
738 	struct net_bridge_port *p;
739 	u16 old_pvid;
740 	int err = 0;
741 	unsigned long *changed;
742 
743 	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
744 			  GFP_KERNEL);
745 	if (!changed)
746 		return -ENOMEM;
747 
748 	old_pvid = br->default_pvid;
749 
750 	/* Update default_pvid config only if we do not conflict with
751 	 * user configuration.
752 	 */
753 	pvent = br_vlan_find(br->vlgrp, pvid);
754 	if ((!old_pvid || vlan_default_pvid(br->vlgrp, br->pvid, old_pvid)) &&
755 	    (!pvent || !br_vlan_should_use(pvent))) {
756 		err = br_vlan_add(br, pvid,
757 				  BRIDGE_VLAN_INFO_PVID |
758 				  BRIDGE_VLAN_INFO_UNTAGGED |
759 				  BRIDGE_VLAN_INFO_BRENTRY);
760 		if (err)
761 			goto out;
762 		br_vlan_delete(br, old_pvid);
763 		set_bit(0, changed);
764 	}
765 
766 	list_for_each_entry(p, &br->port_list, list) {
767 		/* Update default_pvid config only if we do not conflict with
768 		 * user configuration.
769 		 */
770 		if ((old_pvid &&
771 		     !vlan_default_pvid(p->vlgrp, p->pvid, old_pvid)) ||
772 		    br_vlan_find(p->vlgrp, pvid))
773 			continue;
774 
775 		err = nbp_vlan_add(p, pvid,
776 				   BRIDGE_VLAN_INFO_PVID |
777 				   BRIDGE_VLAN_INFO_UNTAGGED);
778 		if (err)
779 			goto err_port;
780 		nbp_vlan_delete(p, old_pvid);
781 		set_bit(p->port_no, changed);
782 	}
783 
784 	br->default_pvid = pvid;
785 
786 out:
787 	kfree(changed);
788 	return err;
789 
790 err_port:
791 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
792 		if (!test_bit(p->port_no, changed))
793 			continue;
794 
795 		if (old_pvid)
796 			nbp_vlan_add(p, old_pvid,
797 				     BRIDGE_VLAN_INFO_PVID |
798 				     BRIDGE_VLAN_INFO_UNTAGGED);
799 		nbp_vlan_delete(p, pvid);
800 	}
801 
802 	if (test_bit(0, changed)) {
803 		if (old_pvid)
804 			br_vlan_add(br, old_pvid,
805 				    BRIDGE_VLAN_INFO_PVID |
806 				    BRIDGE_VLAN_INFO_UNTAGGED |
807 				    BRIDGE_VLAN_INFO_BRENTRY);
808 		br_vlan_delete(br, pvid);
809 	}
810 	goto out;
811 }
812 
813 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
814 {
815 	u16 pvid = val;
816 	int err = 0;
817 
818 	if (val >= VLAN_VID_MASK)
819 		return -EINVAL;
820 
821 	if (!rtnl_trylock())
822 		return restart_syscall();
823 
824 	if (pvid == br->default_pvid)
825 		goto unlock;
826 
827 	/* Only allow default pvid change when filtering is disabled */
828 	if (br->vlan_enabled) {
829 		pr_info_once("Please disable vlan filtering to change default_pvid\n");
830 		err = -EPERM;
831 		goto unlock;
832 	}
833 
834 	if (!pvid)
835 		br_vlan_disable_default_pvid(br);
836 	else
837 		err = __br_vlan_set_default_pvid(br, pvid);
838 
839 unlock:
840 	rtnl_unlock();
841 	return err;
842 }
843 
844 int br_vlan_init(struct net_bridge *br)
845 {
846 	int ret = -ENOMEM;
847 
848 	br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
849 	if (!br->vlgrp)
850 		goto out;
851 	ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
852 	if (ret)
853 		goto err_rhtbl;
854 	INIT_LIST_HEAD(&br->vlgrp->vlan_list);
855 	br->vlan_proto = htons(ETH_P_8021Q);
856 	br->default_pvid = 1;
857 	ret = br_vlan_add(br, 1,
858 			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
859 			  BRIDGE_VLAN_INFO_BRENTRY);
860 	if (ret)
861 		goto err_vlan_add;
862 
863 out:
864 	return ret;
865 
866 err_vlan_add:
867 	rhashtable_destroy(&br->vlgrp->vlan_hash);
868 err_rhtbl:
869 	kfree(br->vlgrp);
870 
871 	goto out;
872 }
873 
874 int nbp_vlan_init(struct net_bridge_port *p)
875 {
876 	int ret = -ENOMEM;
877 
878 	p->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
879 	if (!p->vlgrp)
880 		goto out;
881 
882 	ret = rhashtable_init(&p->vlgrp->vlan_hash, &br_vlan_rht_params);
883 	if (ret)
884 		goto err_rhtbl;
885 	INIT_LIST_HEAD(&p->vlgrp->vlan_list);
886 	if (p->br->default_pvid) {
887 		ret = nbp_vlan_add(p, p->br->default_pvid,
888 				   BRIDGE_VLAN_INFO_PVID |
889 				   BRIDGE_VLAN_INFO_UNTAGGED);
890 		if (ret)
891 			goto err_vlan_add;
892 	}
893 out:
894 	return ret;
895 
896 err_vlan_add:
897 	rhashtable_destroy(&p->vlgrp->vlan_hash);
898 err_rhtbl:
899 	kfree(p->vlgrp);
900 
901 	goto out;
902 }
903 
904 /* Must be protected by RTNL.
905  * Must be called with vid in range from 1 to 4094 inclusive.
906  */
907 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
908 {
909 	struct net_bridge_vlan *vlan;
910 	int ret;
911 
912 	ASSERT_RTNL();
913 
914 	vlan = br_vlan_find(port->vlgrp, vid);
915 	if (vlan) {
916 		__vlan_add_flags(vlan, flags);
917 		return 0;
918 	}
919 
920 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
921 	if (!vlan)
922 		return -ENOMEM;
923 
924 	vlan->vid = vid;
925 	vlan->port = port;
926 	ret = __vlan_add(vlan, flags);
927 	if (ret)
928 		kfree(vlan);
929 
930 	return ret;
931 }
932 
933 /* Must be protected by RTNL.
934  * Must be called with vid in range from 1 to 4094 inclusive.
935  */
936 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
937 {
938 	struct net_bridge_vlan *v;
939 
940 	ASSERT_RTNL();
941 
942 	v = br_vlan_find(port->vlgrp, vid);
943 	if (!v)
944 		return -ENOENT;
945 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
946 	br_fdb_delete_by_port(port->br, port, vid, 0);
947 
948 	return __vlan_del(v);
949 }
950 
951 void nbp_vlan_flush(struct net_bridge_port *port)
952 {
953 	struct net_bridge_vlan *vlan;
954 
955 	ASSERT_RTNL();
956 
957 	list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
958 		vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
959 
960 	__vlan_flush(nbp_vlan_group(port), &port->pvid);
961 }
962