xref: /openbmc/linux/net/bridge/br_vlan.c (revision 0f963b75)
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6 
7 #include "br_private.h"
8 
9 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
10 			      const void *ptr)
11 {
12 	const struct net_bridge_vlan *vle = ptr;
13 	u16 vid = *(u16 *)arg->key;
14 
15 	return vle->vid != vid;
16 }
17 
18 static const struct rhashtable_params br_vlan_rht_params = {
19 	.head_offset = offsetof(struct net_bridge_vlan, vnode),
20 	.key_offset = offsetof(struct net_bridge_vlan, vid),
21 	.key_len = sizeof(u16),
22 	.nelem_hint = 3,
23 	.locks_mul = 1,
24 	.max_size = VLAN_N_VID,
25 	.obj_cmpfn = br_vlan_cmp,
26 	.automatic_shrinking = true,
27 };
28 
29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
30 {
31 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
32 }
33 
34 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35 {
36 	if (vg->pvid == vid)
37 		return;
38 
39 	smp_wmb();
40 	vg->pvid = vid;
41 }
42 
43 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
44 {
45 	if (vg->pvid != vid)
46 		return;
47 
48 	smp_wmb();
49 	vg->pvid = 0;
50 }
51 
52 static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
53 {
54 	struct net_bridge_vlan_group *vg;
55 
56 	if (br_vlan_is_master(v))
57 		vg = v->br->vlgrp;
58 	else
59 		vg = v->port->vlgrp;
60 
61 	if (flags & BRIDGE_VLAN_INFO_PVID)
62 		__vlan_add_pvid(vg, v->vid);
63 	else
64 		__vlan_delete_pvid(vg, v->vid);
65 
66 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
67 		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
68 	else
69 		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
70 }
71 
72 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
73 			  u16 vid, u16 flags)
74 {
75 	const struct net_device_ops *ops = dev->netdev_ops;
76 	int err;
77 
78 	/* If driver uses VLAN ndo ops, use 8021q to install vid
79 	 * on device, otherwise try switchdev ops to install vid.
80 	 */
81 
82 	if (ops->ndo_vlan_rx_add_vid) {
83 		err = vlan_vid_add(dev, br->vlan_proto, vid);
84 	} else {
85 		struct switchdev_obj_port_vlan v = {
86 			.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
87 			.flags = flags,
88 			.vid_begin = vid,
89 			.vid_end = vid,
90 		};
91 
92 		err = switchdev_port_obj_add(dev, &v.obj);
93 		if (err == -EOPNOTSUPP)
94 			err = 0;
95 	}
96 
97 	return err;
98 }
99 
100 static void __vlan_add_list(struct net_bridge_vlan *v)
101 {
102 	struct list_head *headp, *hpos;
103 	struct net_bridge_vlan *vent;
104 
105 	headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
106 				       &v->port->vlgrp->vlan_list;
107 	list_for_each_prev(hpos, headp) {
108 		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
109 		if (v->vid < vent->vid)
110 			continue;
111 		else
112 			break;
113 	}
114 	list_add_rcu(&v->vlist, hpos);
115 }
116 
117 static void __vlan_del_list(struct net_bridge_vlan *v)
118 {
119 	list_del_rcu(&v->vlist);
120 }
121 
122 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
123 			  u16 vid)
124 {
125 	const struct net_device_ops *ops = dev->netdev_ops;
126 	int err = 0;
127 
128 	/* If driver uses VLAN ndo ops, use 8021q to delete vid
129 	 * on device, otherwise try switchdev ops to delete vid.
130 	 */
131 
132 	if (ops->ndo_vlan_rx_kill_vid) {
133 		vlan_vid_del(dev, br->vlan_proto, vid);
134 	} else {
135 		struct switchdev_obj_port_vlan v = {
136 			.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
137 			.vid_begin = vid,
138 			.vid_end = vid,
139 		};
140 
141 		err = switchdev_port_obj_del(dev, &v.obj);
142 		if (err == -EOPNOTSUPP)
143 			err = 0;
144 	}
145 
146 	return err;
147 }
148 
149 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
150  * a reference is taken to the master vlan before returning.
151  */
152 static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
153 {
154 	struct net_bridge_vlan *masterv;
155 
156 	masterv = br_vlan_find(br->vlgrp, vid);
157 	if (!masterv) {
158 		/* missing global ctx, create it now */
159 		if (br_vlan_add(br, vid, 0))
160 			return NULL;
161 		masterv = br_vlan_find(br->vlgrp, vid);
162 		if (WARN_ON(!masterv))
163 			return NULL;
164 	}
165 	atomic_inc(&masterv->refcnt);
166 
167 	return masterv;
168 }
169 
170 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
171 {
172 	if (!br_vlan_is_master(masterv))
173 		return;
174 
175 	if (atomic_dec_and_test(&masterv->refcnt)) {
176 		rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
177 				       &masterv->vnode, br_vlan_rht_params);
178 		__vlan_del_list(masterv);
179 		kfree_rcu(masterv, rcu);
180 	}
181 }
182 
183 /* This is the shared VLAN add function which works for both ports and bridge
184  * devices. There are four possible calls to this function in terms of the
185  * vlan entry type:
186  * 1. vlan is being added on a port (no master flags, global entry exists)
187  * 2. vlan is being added on a bridge (both master and brvlan flags)
188  * 3. vlan is being added on a port, but a global entry didn't exist which
189  *    is being created right now (master flag set, brvlan flag unset), the
190  *    global entry is used for global per-vlan features, but not for filtering
191  * 4. same as 3 but with both master and brvlan flags set so the entry
192  *    will be used for filtering in both the port and the bridge
193  */
194 static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
195 {
196 	struct net_bridge_vlan *masterv = NULL;
197 	struct net_bridge_port *p = NULL;
198 	struct net_bridge_vlan_group *vg;
199 	struct net_device *dev;
200 	struct net_bridge *br;
201 	int err;
202 
203 	if (br_vlan_is_master(v)) {
204 		br = v->br;
205 		dev = br->dev;
206 		vg = br->vlgrp;
207 	} else {
208 		p = v->port;
209 		br = p->br;
210 		dev = p->dev;
211 		vg = p->vlgrp;
212 	}
213 
214 	if (p) {
215 		/* Add VLAN to the device filter if it is supported.
216 		 * This ensures tagged traffic enters the bridge when
217 		 * promiscuous mode is disabled by br_manage_promisc().
218 		 */
219 		err = __vlan_vid_add(dev, br, v->vid, flags);
220 		if (err)
221 			goto out;
222 
223 		/* need to work on the master vlan too */
224 		if (flags & BRIDGE_VLAN_INFO_MASTER) {
225 			err = br_vlan_add(br, v->vid, flags |
226 						      BRIDGE_VLAN_INFO_BRENTRY);
227 			if (err)
228 				goto out_filt;
229 		}
230 
231 		masterv = br_vlan_get_master(br, v->vid);
232 		if (!masterv)
233 			goto out_filt;
234 		v->brvlan = masterv;
235 	}
236 
237 	/* Add the dev mac and count the vlan only if it's usable */
238 	if (br_vlan_should_use(v)) {
239 		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
240 		if (err) {
241 			br_err(br, "failed insert local address into bridge forwarding table\n");
242 			goto out_filt;
243 		}
244 		vg->num_vlans++;
245 	}
246 
247 	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
248 					    br_vlan_rht_params);
249 	if (err)
250 		goto out_fdb_insert;
251 
252 	__vlan_add_list(v);
253 	__vlan_add_flags(v, flags);
254 out:
255 	return err;
256 
257 out_fdb_insert:
258 	if (br_vlan_should_use(v)) {
259 		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
260 		vg->num_vlans--;
261 	}
262 
263 out_filt:
264 	if (p) {
265 		__vlan_vid_del(dev, br, v->vid);
266 		if (masterv) {
267 			br_vlan_put_master(masterv);
268 			v->brvlan = NULL;
269 		}
270 	}
271 
272 	goto out;
273 }
274 
275 static int __vlan_del(struct net_bridge_vlan *v)
276 {
277 	struct net_bridge_vlan *masterv = v;
278 	struct net_bridge_vlan_group *vg;
279 	struct net_bridge_port *p = NULL;
280 	int err = 0;
281 
282 	if (br_vlan_is_master(v)) {
283 		vg = v->br->vlgrp;
284 	} else {
285 		p = v->port;
286 		vg = v->port->vlgrp;
287 		masterv = v->brvlan;
288 	}
289 
290 	__vlan_delete_pvid(vg, v->vid);
291 	if (p) {
292 		err = __vlan_vid_del(p->dev, p->br, v->vid);
293 		if (err)
294 			goto out;
295 	}
296 
297 	if (br_vlan_should_use(v)) {
298 		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
299 		vg->num_vlans--;
300 	}
301 
302 	if (masterv != v) {
303 		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
304 				       br_vlan_rht_params);
305 		__vlan_del_list(v);
306 		kfree_rcu(v, rcu);
307 	}
308 
309 	br_vlan_put_master(masterv);
310 out:
311 	return err;
312 }
313 
314 static void __vlan_flush(struct net_bridge_vlan_group *vlgrp)
315 {
316 	struct net_bridge_vlan *vlan, *tmp;
317 
318 	__vlan_delete_pvid(vlgrp, vlgrp->pvid);
319 	list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
320 		__vlan_del(vlan);
321 	rhashtable_destroy(&vlgrp->vlan_hash);
322 	kfree(vlgrp);
323 }
324 
325 struct sk_buff *br_handle_vlan(struct net_bridge *br,
326 			       struct net_bridge_vlan_group *vg,
327 			       struct sk_buff *skb)
328 {
329 	struct net_bridge_vlan *v;
330 	u16 vid;
331 
332 	/* If this packet was not filtered at input, let it pass */
333 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
334 		goto out;
335 
336 	/* At this point, we know that the frame was filtered and contains
337 	 * a valid vlan id.  If the vlan id has untagged flag set,
338 	 * send untagged; otherwise, send tagged.
339 	 */
340 	br_vlan_get_tag(skb, &vid);
341 	v = br_vlan_find(vg, vid);
342 	/* Vlan entry must be configured at this point.  The
343 	 * only exception is the bridge is set in promisc mode and the
344 	 * packet is destined for the bridge device.  In this case
345 	 * pass the packet as is.
346 	 */
347 	if (!v || !br_vlan_should_use(v)) {
348 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
349 			goto out;
350 		} else {
351 			kfree_skb(skb);
352 			return NULL;
353 		}
354 	}
355 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
356 		skb->vlan_tci = 0;
357 
358 out:
359 	return skb;
360 }
361 
362 /* Called under RCU */
363 static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
364 			      struct sk_buff *skb, u16 *vid)
365 {
366 	const struct net_bridge_vlan *v;
367 	bool tagged;
368 
369 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
370 	/* If vlan tx offload is disabled on bridge device and frame was
371 	 * sent from vlan device on the bridge device, it does not have
372 	 * HW accelerated vlan tag.
373 	 */
374 	if (unlikely(!skb_vlan_tag_present(skb) &&
375 		     skb->protocol == proto)) {
376 		skb = skb_vlan_untag(skb);
377 		if (unlikely(!skb))
378 			return false;
379 	}
380 
381 	if (!br_vlan_get_tag(skb, vid)) {
382 		/* Tagged frame */
383 		if (skb->vlan_proto != proto) {
384 			/* Protocol-mismatch, empty out vlan_tci for new tag */
385 			skb_push(skb, ETH_HLEN);
386 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
387 							skb_vlan_tag_get(skb));
388 			if (unlikely(!skb))
389 				return false;
390 
391 			skb_pull(skb, ETH_HLEN);
392 			skb_reset_mac_len(skb);
393 			*vid = 0;
394 			tagged = false;
395 		} else {
396 			tagged = true;
397 		}
398 	} else {
399 		/* Untagged frame */
400 		tagged = false;
401 	}
402 
403 	if (!*vid) {
404 		u16 pvid = br_get_pvid(vg);
405 
406 		/* Frame had a tag with VID 0 or did not have a tag.
407 		 * See if pvid is set on this port.  That tells us which
408 		 * vlan untagged or priority-tagged traffic belongs to.
409 		 */
410 		if (!pvid)
411 			goto drop;
412 
413 		/* PVID is set on this port.  Any untagged or priority-tagged
414 		 * ingress frame is considered to belong to this vlan.
415 		 */
416 		*vid = pvid;
417 		if (likely(!tagged))
418 			/* Untagged Frame. */
419 			__vlan_hwaccel_put_tag(skb, proto, pvid);
420 		else
421 			/* Priority-tagged Frame.
422 			 * At this point, We know that skb->vlan_tci had
423 			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
424 			 * We update only VID field and preserve PCP field.
425 			 */
426 			skb->vlan_tci |= pvid;
427 
428 		return true;
429 	}
430 
431 	/* Frame had a valid vlan tag.  See if vlan is allowed */
432 	v = br_vlan_find(vg, *vid);
433 	if (v && br_vlan_should_use(v))
434 		return true;
435 drop:
436 	kfree_skb(skb);
437 	return false;
438 }
439 
440 bool br_allowed_ingress(const struct net_bridge *br,
441 			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
442 			u16 *vid)
443 {
444 	/* If VLAN filtering is disabled on the bridge, all packets are
445 	 * permitted.
446 	 */
447 	if (!br->vlan_enabled) {
448 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
449 		return true;
450 	}
451 
452 	return __allowed_ingress(vg, br->vlan_proto, skb, vid);
453 }
454 
455 /* Called under RCU. */
456 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
457 		       const struct sk_buff *skb)
458 {
459 	const struct net_bridge_vlan *v;
460 	u16 vid;
461 
462 	/* If this packet was not filtered at input, let it pass */
463 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
464 		return true;
465 
466 	br_vlan_get_tag(skb, &vid);
467 	v = br_vlan_find(vg, vid);
468 	if (v && br_vlan_should_use(v))
469 		return true;
470 
471 	return false;
472 }
473 
474 /* Called under RCU */
475 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
476 {
477 	struct net_bridge_vlan_group *vg;
478 	struct net_bridge *br = p->br;
479 
480 	/* If filtering was disabled at input, let it pass. */
481 	if (!br->vlan_enabled)
482 		return true;
483 
484 	vg = p->vlgrp;
485 	if (!vg || !vg->num_vlans)
486 		return false;
487 
488 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
489 		*vid = 0;
490 
491 	if (!*vid) {
492 		*vid = br_get_pvid(vg);
493 		if (!*vid)
494 			return false;
495 
496 		return true;
497 	}
498 
499 	if (br_vlan_find(vg, *vid))
500 		return true;
501 
502 	return false;
503 }
504 
505 /* Must be protected by RTNL.
506  * Must be called with vid in range from 1 to 4094 inclusive.
507  */
508 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
509 {
510 	struct net_bridge_vlan *vlan;
511 	int ret;
512 
513 	ASSERT_RTNL();
514 
515 	vlan = br_vlan_find(br->vlgrp, vid);
516 	if (vlan) {
517 		if (!br_vlan_is_brentry(vlan)) {
518 			/* Trying to change flags of non-existent bridge vlan */
519 			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
520 				return -EINVAL;
521 			/* It was only kept for port vlans, now make it real */
522 			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
523 					    vlan->vid);
524 			if (ret) {
525 				br_err(br, "failed insert local address into bridge forwarding table\n");
526 				return ret;
527 			}
528 			atomic_inc(&vlan->refcnt);
529 			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
530 			br->vlgrp->num_vlans++;
531 		}
532 		__vlan_add_flags(vlan, flags);
533 		return 0;
534 	}
535 
536 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
537 	if (!vlan)
538 		return -ENOMEM;
539 
540 	vlan->vid = vid;
541 	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
542 	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
543 	vlan->br = br;
544 	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
545 		atomic_set(&vlan->refcnt, 1);
546 	ret = __vlan_add(vlan, flags);
547 	if (ret)
548 		kfree(vlan);
549 
550 	return ret;
551 }
552 
553 /* Must be protected by RTNL.
554  * Must be called with vid in range from 1 to 4094 inclusive.
555  */
556 int br_vlan_delete(struct net_bridge *br, u16 vid)
557 {
558 	struct net_bridge_vlan *v;
559 
560 	ASSERT_RTNL();
561 
562 	v = br_vlan_find(br->vlgrp, vid);
563 	if (!v || !br_vlan_is_brentry(v))
564 		return -ENOENT;
565 
566 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
567 
568 	return __vlan_del(v);
569 }
570 
571 void br_vlan_flush(struct net_bridge *br)
572 {
573 	ASSERT_RTNL();
574 
575 	__vlan_flush(br_vlan_group(br));
576 }
577 
578 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
579 {
580 	if (!vg)
581 		return NULL;
582 
583 	return br_vlan_lookup(&vg->vlan_hash, vid);
584 }
585 
586 /* Must be protected by RTNL. */
587 static void recalculate_group_addr(struct net_bridge *br)
588 {
589 	if (br->group_addr_set)
590 		return;
591 
592 	spin_lock_bh(&br->lock);
593 	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
594 		/* Bridge Group Address */
595 		br->group_addr[5] = 0x00;
596 	} else { /* vlan_enabled && ETH_P_8021AD */
597 		/* Provider Bridge Group Address */
598 		br->group_addr[5] = 0x08;
599 	}
600 	spin_unlock_bh(&br->lock);
601 }
602 
603 /* Must be protected by RTNL. */
604 void br_recalculate_fwd_mask(struct net_bridge *br)
605 {
606 	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
607 		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
608 	else /* vlan_enabled && ETH_P_8021AD */
609 		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
610 					      ~(1u << br->group_addr[5]);
611 }
612 
613 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
614 {
615 	if (br->vlan_enabled == val)
616 		return 0;
617 
618 	br->vlan_enabled = val;
619 	br_manage_promisc(br);
620 	recalculate_group_addr(br);
621 	br_recalculate_fwd_mask(br);
622 
623 	return 0;
624 }
625 
626 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
627 {
628 	if (!rtnl_trylock())
629 		return restart_syscall();
630 
631 	__br_vlan_filter_toggle(br, val);
632 	rtnl_unlock();
633 
634 	return 0;
635 }
636 
637 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
638 {
639 	int err = 0;
640 	struct net_bridge_port *p;
641 	struct net_bridge_vlan *vlan;
642 	__be16 oldproto;
643 
644 	if (br->vlan_proto == proto)
645 		return 0;
646 
647 	/* Add VLANs for the new proto to the device filter. */
648 	list_for_each_entry(p, &br->port_list, list) {
649 		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
650 			err = vlan_vid_add(p->dev, proto, vlan->vid);
651 			if (err)
652 				goto err_filt;
653 		}
654 	}
655 
656 	oldproto = br->vlan_proto;
657 	br->vlan_proto = proto;
658 
659 	recalculate_group_addr(br);
660 	br_recalculate_fwd_mask(br);
661 
662 	/* Delete VLANs for the old proto from the device filter. */
663 	list_for_each_entry(p, &br->port_list, list)
664 		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
665 			vlan_vid_del(p->dev, oldproto, vlan->vid);
666 
667 	return 0;
668 
669 err_filt:
670 	list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
671 		vlan_vid_del(p->dev, proto, vlan->vid);
672 
673 	list_for_each_entry_continue_reverse(p, &br->port_list, list)
674 		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
675 			vlan_vid_del(p->dev, proto, vlan->vid);
676 
677 	return err;
678 }
679 
680 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
681 {
682 	int err;
683 
684 	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
685 		return -EPROTONOSUPPORT;
686 
687 	if (!rtnl_trylock())
688 		return restart_syscall();
689 
690 	err = __br_vlan_set_proto(br, htons(val));
691 	rtnl_unlock();
692 
693 	return err;
694 }
695 
696 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
697 {
698 	struct net_bridge_vlan *v;
699 
700 	if (vid != vg->pvid)
701 		return false;
702 
703 	v = br_vlan_lookup(&vg->vlan_hash, vid);
704 	if (v && br_vlan_should_use(v) &&
705 	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
706 		return true;
707 
708 	return false;
709 }
710 
711 static void br_vlan_disable_default_pvid(struct net_bridge *br)
712 {
713 	struct net_bridge_port *p;
714 	u16 pvid = br->default_pvid;
715 
716 	/* Disable default_pvid on all ports where it is still
717 	 * configured.
718 	 */
719 	if (vlan_default_pvid(br->vlgrp, pvid))
720 		br_vlan_delete(br, pvid);
721 
722 	list_for_each_entry(p, &br->port_list, list) {
723 		if (vlan_default_pvid(p->vlgrp, pvid))
724 			nbp_vlan_delete(p, pvid);
725 	}
726 
727 	br->default_pvid = 0;
728 }
729 
730 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
731 {
732 	const struct net_bridge_vlan *pvent;
733 	struct net_bridge_port *p;
734 	u16 old_pvid;
735 	int err = 0;
736 	unsigned long *changed;
737 
738 	if (!pvid) {
739 		br_vlan_disable_default_pvid(br);
740 		return 0;
741 	}
742 
743 	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
744 			  GFP_KERNEL);
745 	if (!changed)
746 		return -ENOMEM;
747 
748 	old_pvid = br->default_pvid;
749 
750 	/* Update default_pvid config only if we do not conflict with
751 	 * user configuration.
752 	 */
753 	pvent = br_vlan_find(br->vlgrp, pvid);
754 	if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) &&
755 	    (!pvent || !br_vlan_should_use(pvent))) {
756 		err = br_vlan_add(br, pvid,
757 				  BRIDGE_VLAN_INFO_PVID |
758 				  BRIDGE_VLAN_INFO_UNTAGGED |
759 				  BRIDGE_VLAN_INFO_BRENTRY);
760 		if (err)
761 			goto out;
762 		br_vlan_delete(br, old_pvid);
763 		set_bit(0, changed);
764 	}
765 
766 	list_for_each_entry(p, &br->port_list, list) {
767 		/* Update default_pvid config only if we do not conflict with
768 		 * user configuration.
769 		 */
770 		if ((old_pvid &&
771 		     !vlan_default_pvid(p->vlgrp, old_pvid)) ||
772 		    br_vlan_find(p->vlgrp, pvid))
773 			continue;
774 
775 		err = nbp_vlan_add(p, pvid,
776 				   BRIDGE_VLAN_INFO_PVID |
777 				   BRIDGE_VLAN_INFO_UNTAGGED);
778 		if (err)
779 			goto err_port;
780 		nbp_vlan_delete(p, old_pvid);
781 		set_bit(p->port_no, changed);
782 	}
783 
784 	br->default_pvid = pvid;
785 
786 out:
787 	kfree(changed);
788 	return err;
789 
790 err_port:
791 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
792 		if (!test_bit(p->port_no, changed))
793 			continue;
794 
795 		if (old_pvid)
796 			nbp_vlan_add(p, old_pvid,
797 				     BRIDGE_VLAN_INFO_PVID |
798 				     BRIDGE_VLAN_INFO_UNTAGGED);
799 		nbp_vlan_delete(p, pvid);
800 	}
801 
802 	if (test_bit(0, changed)) {
803 		if (old_pvid)
804 			br_vlan_add(br, old_pvid,
805 				    BRIDGE_VLAN_INFO_PVID |
806 				    BRIDGE_VLAN_INFO_UNTAGGED |
807 				    BRIDGE_VLAN_INFO_BRENTRY);
808 		br_vlan_delete(br, pvid);
809 	}
810 	goto out;
811 }
812 
813 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
814 {
815 	u16 pvid = val;
816 	int err = 0;
817 
818 	if (val >= VLAN_VID_MASK)
819 		return -EINVAL;
820 
821 	if (!rtnl_trylock())
822 		return restart_syscall();
823 
824 	if (pvid == br->default_pvid)
825 		goto unlock;
826 
827 	/* Only allow default pvid change when filtering is disabled */
828 	if (br->vlan_enabled) {
829 		pr_info_once("Please disable vlan filtering to change default_pvid\n");
830 		err = -EPERM;
831 		goto unlock;
832 	}
833 	err = __br_vlan_set_default_pvid(br, pvid);
834 unlock:
835 	rtnl_unlock();
836 	return err;
837 }
838 
839 int br_vlan_init(struct net_bridge *br)
840 {
841 	int ret = -ENOMEM;
842 
843 	br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
844 	if (!br->vlgrp)
845 		goto out;
846 	ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
847 	if (ret)
848 		goto err_rhtbl;
849 	INIT_LIST_HEAD(&br->vlgrp->vlan_list);
850 	br->vlan_proto = htons(ETH_P_8021Q);
851 	br->default_pvid = 1;
852 	ret = br_vlan_add(br, 1,
853 			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
854 			  BRIDGE_VLAN_INFO_BRENTRY);
855 	if (ret)
856 		goto err_vlan_add;
857 
858 out:
859 	return ret;
860 
861 err_vlan_add:
862 	rhashtable_destroy(&br->vlgrp->vlan_hash);
863 err_rhtbl:
864 	kfree(br->vlgrp);
865 
866 	goto out;
867 }
868 
869 int nbp_vlan_init(struct net_bridge_port *p)
870 {
871 	struct net_bridge_vlan_group *vg;
872 	int ret = -ENOMEM;
873 
874 	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
875 	if (!vg)
876 		goto out;
877 
878 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
879 	if (ret)
880 		goto err_rhtbl;
881 	INIT_LIST_HEAD(&vg->vlan_list);
882 	/* Make sure everything's committed before publishing vg */
883 	smp_wmb();
884 	p->vlgrp = vg;
885 	if (p->br->default_pvid) {
886 		ret = nbp_vlan_add(p, p->br->default_pvid,
887 				   BRIDGE_VLAN_INFO_PVID |
888 				   BRIDGE_VLAN_INFO_UNTAGGED);
889 		if (ret)
890 			goto err_vlan_add;
891 	}
892 out:
893 	return ret;
894 
895 err_vlan_add:
896 	rhashtable_destroy(&vg->vlan_hash);
897 err_rhtbl:
898 	kfree(vg);
899 
900 	goto out;
901 }
902 
903 /* Must be protected by RTNL.
904  * Must be called with vid in range from 1 to 4094 inclusive.
905  */
906 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
907 {
908 	struct net_bridge_vlan *vlan;
909 	int ret;
910 
911 	ASSERT_RTNL();
912 
913 	vlan = br_vlan_find(port->vlgrp, vid);
914 	if (vlan) {
915 		__vlan_add_flags(vlan, flags);
916 		return 0;
917 	}
918 
919 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
920 	if (!vlan)
921 		return -ENOMEM;
922 
923 	vlan->vid = vid;
924 	vlan->port = port;
925 	ret = __vlan_add(vlan, flags);
926 	if (ret)
927 		kfree(vlan);
928 
929 	return ret;
930 }
931 
932 /* Must be protected by RTNL.
933  * Must be called with vid in range from 1 to 4094 inclusive.
934  */
935 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
936 {
937 	struct net_bridge_vlan *v;
938 
939 	ASSERT_RTNL();
940 
941 	v = br_vlan_find(port->vlgrp, vid);
942 	if (!v)
943 		return -ENOENT;
944 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
945 	br_fdb_delete_by_port(port->br, port, vid, 0);
946 
947 	return __vlan_del(v);
948 }
949 
950 void nbp_vlan_flush(struct net_bridge_port *port)
951 {
952 	struct net_bridge_vlan *vlan;
953 
954 	ASSERT_RTNL();
955 
956 	list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
957 		vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
958 
959 	__vlan_flush(nbp_vlan_group(port));
960 }
961