xref: /openbmc/linux/net/bridge/br_vlan.c (revision 110e6f26)
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6 
7 #include "br_private.h"
8 
9 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
10 			      const void *ptr)
11 {
12 	const struct net_bridge_vlan *vle = ptr;
13 	u16 vid = *(u16 *)arg->key;
14 
15 	return vle->vid != vid;
16 }
17 
18 static const struct rhashtable_params br_vlan_rht_params = {
19 	.head_offset = offsetof(struct net_bridge_vlan, vnode),
20 	.key_offset = offsetof(struct net_bridge_vlan, vid),
21 	.key_len = sizeof(u16),
22 	.nelem_hint = 3,
23 	.locks_mul = 1,
24 	.max_size = VLAN_N_VID,
25 	.obj_cmpfn = br_vlan_cmp,
26 	.automatic_shrinking = true,
27 };
28 
29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
30 {
31 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
32 }
33 
34 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35 {
36 	if (vg->pvid == vid)
37 		return;
38 
39 	smp_wmb();
40 	vg->pvid = vid;
41 }
42 
43 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
44 {
45 	if (vg->pvid != vid)
46 		return;
47 
48 	smp_wmb();
49 	vg->pvid = 0;
50 }
51 
52 static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
53 {
54 	struct net_bridge_vlan_group *vg;
55 
56 	if (br_vlan_is_master(v))
57 		vg = br_vlan_group(v->br);
58 	else
59 		vg = nbp_vlan_group(v->port);
60 
61 	if (flags & BRIDGE_VLAN_INFO_PVID)
62 		__vlan_add_pvid(vg, v->vid);
63 	else
64 		__vlan_delete_pvid(vg, v->vid);
65 
66 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
67 		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
68 	else
69 		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
70 }
71 
72 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
73 			  u16 vid, u16 flags)
74 {
75 	struct switchdev_obj_port_vlan v = {
76 		.obj.orig_dev = dev,
77 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
78 		.flags = flags,
79 		.vid_begin = vid,
80 		.vid_end = vid,
81 	};
82 	int err;
83 
84 	/* Try switchdev op first. In case it is not supported, fallback to
85 	 * 8021q add.
86 	 */
87 	err = switchdev_port_obj_add(dev, &v.obj);
88 	if (err == -EOPNOTSUPP)
89 		return vlan_vid_add(dev, br->vlan_proto, vid);
90 	return err;
91 }
92 
93 static void __vlan_add_list(struct net_bridge_vlan *v)
94 {
95 	struct net_bridge_vlan_group *vg;
96 	struct list_head *headp, *hpos;
97 	struct net_bridge_vlan *vent;
98 
99 	if (br_vlan_is_master(v))
100 		vg = br_vlan_group(v->br);
101 	else
102 		vg = nbp_vlan_group(v->port);
103 
104 	headp = &vg->vlan_list;
105 	list_for_each_prev(hpos, headp) {
106 		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
107 		if (v->vid < vent->vid)
108 			continue;
109 		else
110 			break;
111 	}
112 	list_add_rcu(&v->vlist, hpos);
113 }
114 
115 static void __vlan_del_list(struct net_bridge_vlan *v)
116 {
117 	list_del_rcu(&v->vlist);
118 }
119 
120 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
121 			  u16 vid)
122 {
123 	struct switchdev_obj_port_vlan v = {
124 		.obj.orig_dev = dev,
125 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
126 		.vid_begin = vid,
127 		.vid_end = vid,
128 	};
129 	int err;
130 
131 	/* Try switchdev op first. In case it is not supported, fallback to
132 	 * 8021q del.
133 	 */
134 	err = switchdev_port_obj_del(dev, &v.obj);
135 	if (err == -EOPNOTSUPP) {
136 		vlan_vid_del(dev, br->vlan_proto, vid);
137 		return 0;
138 	}
139 	return err;
140 }
141 
142 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
143  * a reference is taken to the master vlan before returning.
144  */
145 static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
146 {
147 	struct net_bridge_vlan_group *vg;
148 	struct net_bridge_vlan *masterv;
149 
150 	vg = br_vlan_group(br);
151 	masterv = br_vlan_find(vg, vid);
152 	if (!masterv) {
153 		/* missing global ctx, create it now */
154 		if (br_vlan_add(br, vid, 0))
155 			return NULL;
156 		masterv = br_vlan_find(vg, vid);
157 		if (WARN_ON(!masterv))
158 			return NULL;
159 	}
160 	atomic_inc(&masterv->refcnt);
161 
162 	return masterv;
163 }
164 
165 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
166 {
167 	struct net_bridge_vlan_group *vg;
168 
169 	if (!br_vlan_is_master(masterv))
170 		return;
171 
172 	vg = br_vlan_group(masterv->br);
173 	if (atomic_dec_and_test(&masterv->refcnt)) {
174 		rhashtable_remove_fast(&vg->vlan_hash,
175 				       &masterv->vnode, br_vlan_rht_params);
176 		__vlan_del_list(masterv);
177 		kfree_rcu(masterv, rcu);
178 	}
179 }
180 
181 /* This is the shared VLAN add function which works for both ports and bridge
182  * devices. There are four possible calls to this function in terms of the
183  * vlan entry type:
184  * 1. vlan is being added on a port (no master flags, global entry exists)
185  * 2. vlan is being added on a bridge (both master and brentry flags)
186  * 3. vlan is being added on a port, but a global entry didn't exist which
187  *    is being created right now (master flag set, brentry flag unset), the
188  *    global entry is used for global per-vlan features, but not for filtering
189  * 4. same as 3 but with both master and brentry flags set so the entry
190  *    will be used for filtering in both the port and the bridge
191  */
192 static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
193 {
194 	struct net_bridge_vlan *masterv = NULL;
195 	struct net_bridge_port *p = NULL;
196 	struct net_bridge_vlan_group *vg;
197 	struct net_device *dev;
198 	struct net_bridge *br;
199 	int err;
200 
201 	if (br_vlan_is_master(v)) {
202 		br = v->br;
203 		dev = br->dev;
204 		vg = br_vlan_group(br);
205 	} else {
206 		p = v->port;
207 		br = p->br;
208 		dev = p->dev;
209 		vg = nbp_vlan_group(p);
210 	}
211 
212 	if (p) {
213 		/* Add VLAN to the device filter if it is supported.
214 		 * This ensures tagged traffic enters the bridge when
215 		 * promiscuous mode is disabled by br_manage_promisc().
216 		 */
217 		err = __vlan_vid_add(dev, br, v->vid, flags);
218 		if (err)
219 			goto out;
220 
221 		/* need to work on the master vlan too */
222 		if (flags & BRIDGE_VLAN_INFO_MASTER) {
223 			err = br_vlan_add(br, v->vid, flags |
224 						      BRIDGE_VLAN_INFO_BRENTRY);
225 			if (err)
226 				goto out_filt;
227 		}
228 
229 		masterv = br_vlan_get_master(br, v->vid);
230 		if (!masterv)
231 			goto out_filt;
232 		v->brvlan = masterv;
233 	}
234 
235 	/* Add the dev mac and count the vlan only if it's usable */
236 	if (br_vlan_should_use(v)) {
237 		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
238 		if (err) {
239 			br_err(br, "failed insert local address into bridge forwarding table\n");
240 			goto out_filt;
241 		}
242 		vg->num_vlans++;
243 	}
244 
245 	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
246 					    br_vlan_rht_params);
247 	if (err)
248 		goto out_fdb_insert;
249 
250 	__vlan_add_list(v);
251 	__vlan_add_flags(v, flags);
252 out:
253 	return err;
254 
255 out_fdb_insert:
256 	if (br_vlan_should_use(v)) {
257 		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
258 		vg->num_vlans--;
259 	}
260 
261 out_filt:
262 	if (p) {
263 		__vlan_vid_del(dev, br, v->vid);
264 		if (masterv) {
265 			br_vlan_put_master(masterv);
266 			v->brvlan = NULL;
267 		}
268 	}
269 
270 	goto out;
271 }
272 
273 static int __vlan_del(struct net_bridge_vlan *v)
274 {
275 	struct net_bridge_vlan *masterv = v;
276 	struct net_bridge_vlan_group *vg;
277 	struct net_bridge_port *p = NULL;
278 	int err = 0;
279 
280 	if (br_vlan_is_master(v)) {
281 		vg = br_vlan_group(v->br);
282 	} else {
283 		p = v->port;
284 		vg = nbp_vlan_group(v->port);
285 		masterv = v->brvlan;
286 	}
287 
288 	__vlan_delete_pvid(vg, v->vid);
289 	if (p) {
290 		err = __vlan_vid_del(p->dev, p->br, v->vid);
291 		if (err)
292 			goto out;
293 	}
294 
295 	if (br_vlan_should_use(v)) {
296 		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
297 		vg->num_vlans--;
298 	}
299 
300 	if (masterv != v) {
301 		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
302 				       br_vlan_rht_params);
303 		__vlan_del_list(v);
304 		kfree_rcu(v, rcu);
305 	}
306 
307 	br_vlan_put_master(masterv);
308 out:
309 	return err;
310 }
311 
312 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
313 {
314 	WARN_ON(!list_empty(&vg->vlan_list));
315 	rhashtable_destroy(&vg->vlan_hash);
316 	kfree(vg);
317 }
318 
319 static void __vlan_flush(struct net_bridge_vlan_group *vg)
320 {
321 	struct net_bridge_vlan *vlan, *tmp;
322 
323 	__vlan_delete_pvid(vg, vg->pvid);
324 	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
325 		__vlan_del(vlan);
326 }
327 
328 struct sk_buff *br_handle_vlan(struct net_bridge *br,
329 			       struct net_bridge_vlan_group *vg,
330 			       struct sk_buff *skb)
331 {
332 	struct net_bridge_vlan *v;
333 	u16 vid;
334 
335 	/* If this packet was not filtered at input, let it pass */
336 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
337 		goto out;
338 
339 	/* At this point, we know that the frame was filtered and contains
340 	 * a valid vlan id.  If the vlan id has untagged flag set,
341 	 * send untagged; otherwise, send tagged.
342 	 */
343 	br_vlan_get_tag(skb, &vid);
344 	v = br_vlan_find(vg, vid);
345 	/* Vlan entry must be configured at this point.  The
346 	 * only exception is the bridge is set in promisc mode and the
347 	 * packet is destined for the bridge device.  In this case
348 	 * pass the packet as is.
349 	 */
350 	if (!v || !br_vlan_should_use(v)) {
351 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
352 			goto out;
353 		} else {
354 			kfree_skb(skb);
355 			return NULL;
356 		}
357 	}
358 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
359 		skb->vlan_tci = 0;
360 
361 out:
362 	return skb;
363 }
364 
365 /* Called under RCU */
366 static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
367 			      struct sk_buff *skb, u16 *vid)
368 {
369 	const struct net_bridge_vlan *v;
370 	bool tagged;
371 
372 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
373 	/* If vlan tx offload is disabled on bridge device and frame was
374 	 * sent from vlan device on the bridge device, it does not have
375 	 * HW accelerated vlan tag.
376 	 */
377 	if (unlikely(!skb_vlan_tag_present(skb) &&
378 		     skb->protocol == proto)) {
379 		skb = skb_vlan_untag(skb);
380 		if (unlikely(!skb))
381 			return false;
382 	}
383 
384 	if (!br_vlan_get_tag(skb, vid)) {
385 		/* Tagged frame */
386 		if (skb->vlan_proto != proto) {
387 			/* Protocol-mismatch, empty out vlan_tci for new tag */
388 			skb_push(skb, ETH_HLEN);
389 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
390 							skb_vlan_tag_get(skb));
391 			if (unlikely(!skb))
392 				return false;
393 
394 			skb_pull(skb, ETH_HLEN);
395 			skb_reset_mac_len(skb);
396 			*vid = 0;
397 			tagged = false;
398 		} else {
399 			tagged = true;
400 		}
401 	} else {
402 		/* Untagged frame */
403 		tagged = false;
404 	}
405 
406 	if (!*vid) {
407 		u16 pvid = br_get_pvid(vg);
408 
409 		/* Frame had a tag with VID 0 or did not have a tag.
410 		 * See if pvid is set on this port.  That tells us which
411 		 * vlan untagged or priority-tagged traffic belongs to.
412 		 */
413 		if (!pvid)
414 			goto drop;
415 
416 		/* PVID is set on this port.  Any untagged or priority-tagged
417 		 * ingress frame is considered to belong to this vlan.
418 		 */
419 		*vid = pvid;
420 		if (likely(!tagged))
421 			/* Untagged Frame. */
422 			__vlan_hwaccel_put_tag(skb, proto, pvid);
423 		else
424 			/* Priority-tagged Frame.
425 			 * At this point, We know that skb->vlan_tci had
426 			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
427 			 * We update only VID field and preserve PCP field.
428 			 */
429 			skb->vlan_tci |= pvid;
430 
431 		return true;
432 	}
433 
434 	/* Frame had a valid vlan tag.  See if vlan is allowed */
435 	v = br_vlan_find(vg, *vid);
436 	if (v && br_vlan_should_use(v))
437 		return true;
438 drop:
439 	kfree_skb(skb);
440 	return false;
441 }
442 
443 bool br_allowed_ingress(const struct net_bridge *br,
444 			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
445 			u16 *vid)
446 {
447 	/* If VLAN filtering is disabled on the bridge, all packets are
448 	 * permitted.
449 	 */
450 	if (!br->vlan_enabled) {
451 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
452 		return true;
453 	}
454 
455 	return __allowed_ingress(vg, br->vlan_proto, skb, vid);
456 }
457 
458 /* Called under RCU. */
459 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
460 		       const struct sk_buff *skb)
461 {
462 	const struct net_bridge_vlan *v;
463 	u16 vid;
464 
465 	/* If this packet was not filtered at input, let it pass */
466 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
467 		return true;
468 
469 	br_vlan_get_tag(skb, &vid);
470 	v = br_vlan_find(vg, vid);
471 	if (v && br_vlan_should_use(v))
472 		return true;
473 
474 	return false;
475 }
476 
477 /* Called under RCU */
478 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
479 {
480 	struct net_bridge_vlan_group *vg;
481 	struct net_bridge *br = p->br;
482 
483 	/* If filtering was disabled at input, let it pass. */
484 	if (!br->vlan_enabled)
485 		return true;
486 
487 	vg = nbp_vlan_group_rcu(p);
488 	if (!vg || !vg->num_vlans)
489 		return false;
490 
491 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
492 		*vid = 0;
493 
494 	if (!*vid) {
495 		*vid = br_get_pvid(vg);
496 		if (!*vid)
497 			return false;
498 
499 		return true;
500 	}
501 
502 	if (br_vlan_find(vg, *vid))
503 		return true;
504 
505 	return false;
506 }
507 
508 /* Must be protected by RTNL.
509  * Must be called with vid in range from 1 to 4094 inclusive.
510  */
511 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
512 {
513 	struct net_bridge_vlan_group *vg;
514 	struct net_bridge_vlan *vlan;
515 	int ret;
516 
517 	ASSERT_RTNL();
518 
519 	vg = br_vlan_group(br);
520 	vlan = br_vlan_find(vg, vid);
521 	if (vlan) {
522 		if (!br_vlan_is_brentry(vlan)) {
523 			/* Trying to change flags of non-existent bridge vlan */
524 			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
525 				return -EINVAL;
526 			/* It was only kept for port vlans, now make it real */
527 			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
528 					    vlan->vid);
529 			if (ret) {
530 				br_err(br, "failed insert local address into bridge forwarding table\n");
531 				return ret;
532 			}
533 			atomic_inc(&vlan->refcnt);
534 			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
535 			vg->num_vlans++;
536 		}
537 		__vlan_add_flags(vlan, flags);
538 		return 0;
539 	}
540 
541 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
542 	if (!vlan)
543 		return -ENOMEM;
544 
545 	vlan->vid = vid;
546 	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
547 	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
548 	vlan->br = br;
549 	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
550 		atomic_set(&vlan->refcnt, 1);
551 	ret = __vlan_add(vlan, flags);
552 	if (ret)
553 		kfree(vlan);
554 
555 	return ret;
556 }
557 
558 /* Must be protected by RTNL.
559  * Must be called with vid in range from 1 to 4094 inclusive.
560  */
561 int br_vlan_delete(struct net_bridge *br, u16 vid)
562 {
563 	struct net_bridge_vlan_group *vg;
564 	struct net_bridge_vlan *v;
565 
566 	ASSERT_RTNL();
567 
568 	vg = br_vlan_group(br);
569 	v = br_vlan_find(vg, vid);
570 	if (!v || !br_vlan_is_brentry(v))
571 		return -ENOENT;
572 
573 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
574 	br_fdb_delete_by_port(br, NULL, vid, 0);
575 
576 	return __vlan_del(v);
577 }
578 
579 void br_vlan_flush(struct net_bridge *br)
580 {
581 	struct net_bridge_vlan_group *vg;
582 
583 	ASSERT_RTNL();
584 
585 	vg = br_vlan_group(br);
586 	__vlan_flush(vg);
587 	RCU_INIT_POINTER(br->vlgrp, NULL);
588 	synchronize_rcu();
589 	__vlan_group_free(vg);
590 }
591 
592 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
593 {
594 	if (!vg)
595 		return NULL;
596 
597 	return br_vlan_lookup(&vg->vlan_hash, vid);
598 }
599 
600 /* Must be protected by RTNL. */
601 static void recalculate_group_addr(struct net_bridge *br)
602 {
603 	if (br->group_addr_set)
604 		return;
605 
606 	spin_lock_bh(&br->lock);
607 	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
608 		/* Bridge Group Address */
609 		br->group_addr[5] = 0x00;
610 	} else { /* vlan_enabled && ETH_P_8021AD */
611 		/* Provider Bridge Group Address */
612 		br->group_addr[5] = 0x08;
613 	}
614 	spin_unlock_bh(&br->lock);
615 }
616 
617 /* Must be protected by RTNL. */
618 void br_recalculate_fwd_mask(struct net_bridge *br)
619 {
620 	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
621 		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
622 	else /* vlan_enabled && ETH_P_8021AD */
623 		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
624 					      ~(1u << br->group_addr[5]);
625 }
626 
627 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
628 {
629 	struct switchdev_attr attr = {
630 		.orig_dev = br->dev,
631 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
632 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
633 		.u.vlan_filtering = val,
634 	};
635 	int err;
636 
637 	if (br->vlan_enabled == val)
638 		return 0;
639 
640 	err = switchdev_port_attr_set(br->dev, &attr);
641 	if (err && err != -EOPNOTSUPP)
642 		return err;
643 
644 	br->vlan_enabled = val;
645 	br_manage_promisc(br);
646 	recalculate_group_addr(br);
647 	br_recalculate_fwd_mask(br);
648 
649 	return 0;
650 }
651 
652 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
653 {
654 	int err;
655 
656 	if (!rtnl_trylock())
657 		return restart_syscall();
658 
659 	err = __br_vlan_filter_toggle(br, val);
660 	rtnl_unlock();
661 
662 	return err;
663 }
664 
665 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
666 {
667 	int err = 0;
668 	struct net_bridge_port *p;
669 	struct net_bridge_vlan *vlan;
670 	struct net_bridge_vlan_group *vg;
671 	__be16 oldproto;
672 
673 	if (br->vlan_proto == proto)
674 		return 0;
675 
676 	/* Add VLANs for the new proto to the device filter. */
677 	list_for_each_entry(p, &br->port_list, list) {
678 		vg = nbp_vlan_group(p);
679 		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
680 			err = vlan_vid_add(p->dev, proto, vlan->vid);
681 			if (err)
682 				goto err_filt;
683 		}
684 	}
685 
686 	oldproto = br->vlan_proto;
687 	br->vlan_proto = proto;
688 
689 	recalculate_group_addr(br);
690 	br_recalculate_fwd_mask(br);
691 
692 	/* Delete VLANs for the old proto from the device filter. */
693 	list_for_each_entry(p, &br->port_list, list) {
694 		vg = nbp_vlan_group(p);
695 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
696 			vlan_vid_del(p->dev, oldproto, vlan->vid);
697 	}
698 
699 	return 0;
700 
701 err_filt:
702 	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
703 		vlan_vid_del(p->dev, proto, vlan->vid);
704 
705 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
706 		vg = nbp_vlan_group(p);
707 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
708 			vlan_vid_del(p->dev, proto, vlan->vid);
709 	}
710 
711 	return err;
712 }
713 
714 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
715 {
716 	int err;
717 
718 	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
719 		return -EPROTONOSUPPORT;
720 
721 	if (!rtnl_trylock())
722 		return restart_syscall();
723 
724 	err = __br_vlan_set_proto(br, htons(val));
725 	rtnl_unlock();
726 
727 	return err;
728 }
729 
730 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
731 {
732 	struct net_bridge_vlan *v;
733 
734 	if (vid != vg->pvid)
735 		return false;
736 
737 	v = br_vlan_lookup(&vg->vlan_hash, vid);
738 	if (v && br_vlan_should_use(v) &&
739 	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
740 		return true;
741 
742 	return false;
743 }
744 
745 static void br_vlan_disable_default_pvid(struct net_bridge *br)
746 {
747 	struct net_bridge_port *p;
748 	u16 pvid = br->default_pvid;
749 
750 	/* Disable default_pvid on all ports where it is still
751 	 * configured.
752 	 */
753 	if (vlan_default_pvid(br_vlan_group(br), pvid))
754 		br_vlan_delete(br, pvid);
755 
756 	list_for_each_entry(p, &br->port_list, list) {
757 		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
758 			nbp_vlan_delete(p, pvid);
759 	}
760 
761 	br->default_pvid = 0;
762 }
763 
764 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
765 {
766 	const struct net_bridge_vlan *pvent;
767 	struct net_bridge_vlan_group *vg;
768 	struct net_bridge_port *p;
769 	u16 old_pvid;
770 	int err = 0;
771 	unsigned long *changed;
772 
773 	if (!pvid) {
774 		br_vlan_disable_default_pvid(br);
775 		return 0;
776 	}
777 
778 	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
779 			  GFP_KERNEL);
780 	if (!changed)
781 		return -ENOMEM;
782 
783 	old_pvid = br->default_pvid;
784 
785 	/* Update default_pvid config only if we do not conflict with
786 	 * user configuration.
787 	 */
788 	vg = br_vlan_group(br);
789 	pvent = br_vlan_find(vg, pvid);
790 	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
791 	    (!pvent || !br_vlan_should_use(pvent))) {
792 		err = br_vlan_add(br, pvid,
793 				  BRIDGE_VLAN_INFO_PVID |
794 				  BRIDGE_VLAN_INFO_UNTAGGED |
795 				  BRIDGE_VLAN_INFO_BRENTRY);
796 		if (err)
797 			goto out;
798 		br_vlan_delete(br, old_pvid);
799 		set_bit(0, changed);
800 	}
801 
802 	list_for_each_entry(p, &br->port_list, list) {
803 		/* Update default_pvid config only if we do not conflict with
804 		 * user configuration.
805 		 */
806 		vg = nbp_vlan_group(p);
807 		if ((old_pvid &&
808 		     !vlan_default_pvid(vg, old_pvid)) ||
809 		    br_vlan_find(vg, pvid))
810 			continue;
811 
812 		err = nbp_vlan_add(p, pvid,
813 				   BRIDGE_VLAN_INFO_PVID |
814 				   BRIDGE_VLAN_INFO_UNTAGGED);
815 		if (err)
816 			goto err_port;
817 		nbp_vlan_delete(p, old_pvid);
818 		set_bit(p->port_no, changed);
819 	}
820 
821 	br->default_pvid = pvid;
822 
823 out:
824 	kfree(changed);
825 	return err;
826 
827 err_port:
828 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
829 		if (!test_bit(p->port_no, changed))
830 			continue;
831 
832 		if (old_pvid)
833 			nbp_vlan_add(p, old_pvid,
834 				     BRIDGE_VLAN_INFO_PVID |
835 				     BRIDGE_VLAN_INFO_UNTAGGED);
836 		nbp_vlan_delete(p, pvid);
837 	}
838 
839 	if (test_bit(0, changed)) {
840 		if (old_pvid)
841 			br_vlan_add(br, old_pvid,
842 				    BRIDGE_VLAN_INFO_PVID |
843 				    BRIDGE_VLAN_INFO_UNTAGGED |
844 				    BRIDGE_VLAN_INFO_BRENTRY);
845 		br_vlan_delete(br, pvid);
846 	}
847 	goto out;
848 }
849 
850 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
851 {
852 	u16 pvid = val;
853 	int err = 0;
854 
855 	if (val >= VLAN_VID_MASK)
856 		return -EINVAL;
857 
858 	if (!rtnl_trylock())
859 		return restart_syscall();
860 
861 	if (pvid == br->default_pvid)
862 		goto unlock;
863 
864 	/* Only allow default pvid change when filtering is disabled */
865 	if (br->vlan_enabled) {
866 		pr_info_once("Please disable vlan filtering to change default_pvid\n");
867 		err = -EPERM;
868 		goto unlock;
869 	}
870 	err = __br_vlan_set_default_pvid(br, pvid);
871 unlock:
872 	rtnl_unlock();
873 	return err;
874 }
875 
876 int br_vlan_init(struct net_bridge *br)
877 {
878 	struct net_bridge_vlan_group *vg;
879 	int ret = -ENOMEM;
880 
881 	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
882 	if (!vg)
883 		goto out;
884 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
885 	if (ret)
886 		goto err_rhtbl;
887 	INIT_LIST_HEAD(&vg->vlan_list);
888 	br->vlan_proto = htons(ETH_P_8021Q);
889 	br->default_pvid = 1;
890 	rcu_assign_pointer(br->vlgrp, vg);
891 	ret = br_vlan_add(br, 1,
892 			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
893 			  BRIDGE_VLAN_INFO_BRENTRY);
894 	if (ret)
895 		goto err_vlan_add;
896 
897 out:
898 	return ret;
899 
900 err_vlan_add:
901 	rhashtable_destroy(&vg->vlan_hash);
902 err_rhtbl:
903 	kfree(vg);
904 
905 	goto out;
906 }
907 
908 int nbp_vlan_init(struct net_bridge_port *p)
909 {
910 	struct switchdev_attr attr = {
911 		.orig_dev = p->br->dev,
912 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
913 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
914 		.u.vlan_filtering = p->br->vlan_enabled,
915 	};
916 	struct net_bridge_vlan_group *vg;
917 	int ret = -ENOMEM;
918 
919 	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
920 	if (!vg)
921 		goto out;
922 
923 	ret = switchdev_port_attr_set(p->dev, &attr);
924 	if (ret && ret != -EOPNOTSUPP)
925 		goto err_vlan_enabled;
926 
927 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
928 	if (ret)
929 		goto err_rhtbl;
930 	INIT_LIST_HEAD(&vg->vlan_list);
931 	rcu_assign_pointer(p->vlgrp, vg);
932 	if (p->br->default_pvid) {
933 		ret = nbp_vlan_add(p, p->br->default_pvid,
934 				   BRIDGE_VLAN_INFO_PVID |
935 				   BRIDGE_VLAN_INFO_UNTAGGED);
936 		if (ret)
937 			goto err_vlan_add;
938 	}
939 out:
940 	return ret;
941 
942 err_vlan_add:
943 	RCU_INIT_POINTER(p->vlgrp, NULL);
944 	synchronize_rcu();
945 	rhashtable_destroy(&vg->vlan_hash);
946 err_vlan_enabled:
947 err_rhtbl:
948 	kfree(vg);
949 
950 	goto out;
951 }
952 
953 /* Must be protected by RTNL.
954  * Must be called with vid in range from 1 to 4094 inclusive.
955  */
956 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
957 {
958 	struct switchdev_obj_port_vlan v = {
959 		.obj.orig_dev = port->dev,
960 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
961 		.flags = flags,
962 		.vid_begin = vid,
963 		.vid_end = vid,
964 	};
965 	struct net_bridge_vlan *vlan;
966 	int ret;
967 
968 	ASSERT_RTNL();
969 
970 	vlan = br_vlan_find(nbp_vlan_group(port), vid);
971 	if (vlan) {
972 		/* Pass the flags to the hardware bridge */
973 		ret = switchdev_port_obj_add(port->dev, &v.obj);
974 		if (ret && ret != -EOPNOTSUPP)
975 			return ret;
976 		__vlan_add_flags(vlan, flags);
977 		return 0;
978 	}
979 
980 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
981 	if (!vlan)
982 		return -ENOMEM;
983 
984 	vlan->vid = vid;
985 	vlan->port = port;
986 	ret = __vlan_add(vlan, flags);
987 	if (ret)
988 		kfree(vlan);
989 
990 	return ret;
991 }
992 
993 /* Must be protected by RTNL.
994  * Must be called with vid in range from 1 to 4094 inclusive.
995  */
996 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
997 {
998 	struct net_bridge_vlan *v;
999 
1000 	ASSERT_RTNL();
1001 
1002 	v = br_vlan_find(nbp_vlan_group(port), vid);
1003 	if (!v)
1004 		return -ENOENT;
1005 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1006 	br_fdb_delete_by_port(port->br, port, vid, 0);
1007 
1008 	return __vlan_del(v);
1009 }
1010 
1011 void nbp_vlan_flush(struct net_bridge_port *port)
1012 {
1013 	struct net_bridge_vlan_group *vg;
1014 
1015 	ASSERT_RTNL();
1016 
1017 	vg = nbp_vlan_group(port);
1018 	__vlan_flush(vg);
1019 	RCU_INIT_POINTER(port->vlgrp, NULL);
1020 	synchronize_rcu();
1021 	__vlan_group_free(vg);
1022 }
1023