xref: /openbmc/linux/net/bridge/br_vlan.c (revision 0c7beb2d)
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6 
7 #include "br_private.h"
8 #include "br_private_tunnel.h"
9 
10 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
11 			      const void *ptr)
12 {
13 	const struct net_bridge_vlan *vle = ptr;
14 	u16 vid = *(u16 *)arg->key;
15 
16 	return vle->vid != vid;
17 }
18 
19 static const struct rhashtable_params br_vlan_rht_params = {
20 	.head_offset = offsetof(struct net_bridge_vlan, vnode),
21 	.key_offset = offsetof(struct net_bridge_vlan, vid),
22 	.key_len = sizeof(u16),
23 	.nelem_hint = 3,
24 	.max_size = VLAN_N_VID,
25 	.obj_cmpfn = br_vlan_cmp,
26 	.automatic_shrinking = true,
27 };
28 
29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
30 {
31 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
32 }
33 
34 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35 {
36 	if (vg->pvid == vid)
37 		return false;
38 
39 	smp_wmb();
40 	vg->pvid = vid;
41 
42 	return true;
43 }
44 
45 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
46 {
47 	if (vg->pvid != vid)
48 		return false;
49 
50 	smp_wmb();
51 	vg->pvid = 0;
52 
53 	return true;
54 }
55 
56 /* return true if anything changed, false otherwise */
57 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
58 {
59 	struct net_bridge_vlan_group *vg;
60 	u16 old_flags = v->flags;
61 	bool ret;
62 
63 	if (br_vlan_is_master(v))
64 		vg = br_vlan_group(v->br);
65 	else
66 		vg = nbp_vlan_group(v->port);
67 
68 	if (flags & BRIDGE_VLAN_INFO_PVID)
69 		ret = __vlan_add_pvid(vg, v->vid);
70 	else
71 		ret = __vlan_delete_pvid(vg, v->vid);
72 
73 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
74 		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
75 	else
76 		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
77 
78 	return ret || !!(old_flags ^ v->flags);
79 }
80 
81 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
82 			  struct net_bridge_vlan *v, u16 flags,
83 			  struct netlink_ext_ack *extack)
84 {
85 	int err;
86 
87 	/* Try switchdev op first. In case it is not supported, fallback to
88 	 * 8021q add.
89 	 */
90 	err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
91 	if (err == -EOPNOTSUPP)
92 		return vlan_vid_add(dev, br->vlan_proto, v->vid);
93 	v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
94 	return err;
95 }
96 
97 static void __vlan_add_list(struct net_bridge_vlan *v)
98 {
99 	struct net_bridge_vlan_group *vg;
100 	struct list_head *headp, *hpos;
101 	struct net_bridge_vlan *vent;
102 
103 	if (br_vlan_is_master(v))
104 		vg = br_vlan_group(v->br);
105 	else
106 		vg = nbp_vlan_group(v->port);
107 
108 	headp = &vg->vlan_list;
109 	list_for_each_prev(hpos, headp) {
110 		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
111 		if (v->vid < vent->vid)
112 			continue;
113 		else
114 			break;
115 	}
116 	list_add_rcu(&v->vlist, hpos);
117 }
118 
119 static void __vlan_del_list(struct net_bridge_vlan *v)
120 {
121 	list_del_rcu(&v->vlist);
122 }
123 
124 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
125 			  const struct net_bridge_vlan *v)
126 {
127 	int err;
128 
129 	/* Try switchdev op first. In case it is not supported, fallback to
130 	 * 8021q del.
131 	 */
132 	err = br_switchdev_port_vlan_del(dev, v->vid);
133 	if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
134 		vlan_vid_del(dev, br->vlan_proto, v->vid);
135 	return err == -EOPNOTSUPP ? 0 : err;
136 }
137 
138 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
139  * a reference is taken to the master vlan before returning.
140  */
141 static struct net_bridge_vlan *
142 br_vlan_get_master(struct net_bridge *br, u16 vid,
143 		   struct netlink_ext_ack *extack)
144 {
145 	struct net_bridge_vlan_group *vg;
146 	struct net_bridge_vlan *masterv;
147 
148 	vg = br_vlan_group(br);
149 	masterv = br_vlan_find(vg, vid);
150 	if (!masterv) {
151 		bool changed;
152 
153 		/* missing global ctx, create it now */
154 		if (br_vlan_add(br, vid, 0, &changed, extack))
155 			return NULL;
156 		masterv = br_vlan_find(vg, vid);
157 		if (WARN_ON(!masterv))
158 			return NULL;
159 		refcount_set(&masterv->refcnt, 1);
160 		return masterv;
161 	}
162 	refcount_inc(&masterv->refcnt);
163 
164 	return masterv;
165 }
166 
167 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
168 {
169 	struct net_bridge_vlan *v;
170 
171 	v = container_of(rcu, struct net_bridge_vlan, rcu);
172 	WARN_ON(!br_vlan_is_master(v));
173 	free_percpu(v->stats);
174 	v->stats = NULL;
175 	kfree(v);
176 }
177 
178 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
179 {
180 	struct net_bridge_vlan_group *vg;
181 
182 	if (!br_vlan_is_master(masterv))
183 		return;
184 
185 	vg = br_vlan_group(masterv->br);
186 	if (refcount_dec_and_test(&masterv->refcnt)) {
187 		rhashtable_remove_fast(&vg->vlan_hash,
188 				       &masterv->vnode, br_vlan_rht_params);
189 		__vlan_del_list(masterv);
190 		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
191 	}
192 }
193 
194 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
195 {
196 	struct net_bridge_vlan *v;
197 
198 	v = container_of(rcu, struct net_bridge_vlan, rcu);
199 	WARN_ON(br_vlan_is_master(v));
200 	/* if we had per-port stats configured then free them here */
201 	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
202 		free_percpu(v->stats);
203 	v->stats = NULL;
204 	kfree(v);
205 }
206 
207 /* This is the shared VLAN add function which works for both ports and bridge
208  * devices. There are four possible calls to this function in terms of the
209  * vlan entry type:
210  * 1. vlan is being added on a port (no master flags, global entry exists)
211  * 2. vlan is being added on a bridge (both master and brentry flags)
212  * 3. vlan is being added on a port, but a global entry didn't exist which
213  *    is being created right now (master flag set, brentry flag unset), the
214  *    global entry is used for global per-vlan features, but not for filtering
215  * 4. same as 3 but with both master and brentry flags set so the entry
216  *    will be used for filtering in both the port and the bridge
217  */
218 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
219 		      struct netlink_ext_ack *extack)
220 {
221 	struct net_bridge_vlan *masterv = NULL;
222 	struct net_bridge_port *p = NULL;
223 	struct net_bridge_vlan_group *vg;
224 	struct net_device *dev;
225 	struct net_bridge *br;
226 	int err;
227 
228 	if (br_vlan_is_master(v)) {
229 		br = v->br;
230 		dev = br->dev;
231 		vg = br_vlan_group(br);
232 	} else {
233 		p = v->port;
234 		br = p->br;
235 		dev = p->dev;
236 		vg = nbp_vlan_group(p);
237 	}
238 
239 	if (p) {
240 		/* Add VLAN to the device filter if it is supported.
241 		 * This ensures tagged traffic enters the bridge when
242 		 * promiscuous mode is disabled by br_manage_promisc().
243 		 */
244 		err = __vlan_vid_add(dev, br, v, flags, extack);
245 		if (err)
246 			goto out;
247 
248 		/* need to work on the master vlan too */
249 		if (flags & BRIDGE_VLAN_INFO_MASTER) {
250 			bool changed;
251 
252 			err = br_vlan_add(br, v->vid,
253 					  flags | BRIDGE_VLAN_INFO_BRENTRY,
254 					  &changed, extack);
255 			if (err)
256 				goto out_filt;
257 		}
258 
259 		masterv = br_vlan_get_master(br, v->vid, extack);
260 		if (!masterv)
261 			goto out_filt;
262 		v->brvlan = masterv;
263 		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
264 			v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
265 			if (!v->stats) {
266 				err = -ENOMEM;
267 				goto out_filt;
268 			}
269 			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
270 		} else {
271 			v->stats = masterv->stats;
272 		}
273 	} else {
274 		err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
275 		if (err && err != -EOPNOTSUPP)
276 			goto out;
277 	}
278 
279 	/* Add the dev mac and count the vlan only if it's usable */
280 	if (br_vlan_should_use(v)) {
281 		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
282 		if (err) {
283 			br_err(br, "failed insert local address into bridge forwarding table\n");
284 			goto out_filt;
285 		}
286 		vg->num_vlans++;
287 	}
288 
289 	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
290 					    br_vlan_rht_params);
291 	if (err)
292 		goto out_fdb_insert;
293 
294 	__vlan_add_list(v);
295 	__vlan_add_flags(v, flags);
296 out:
297 	return err;
298 
299 out_fdb_insert:
300 	if (br_vlan_should_use(v)) {
301 		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
302 		vg->num_vlans--;
303 	}
304 
305 out_filt:
306 	if (p) {
307 		__vlan_vid_del(dev, br, v);
308 		if (masterv) {
309 			if (v->stats && masterv->stats != v->stats)
310 				free_percpu(v->stats);
311 			v->stats = NULL;
312 
313 			br_vlan_put_master(masterv);
314 			v->brvlan = NULL;
315 		}
316 	} else {
317 		br_switchdev_port_vlan_del(dev, v->vid);
318 	}
319 
320 	goto out;
321 }
322 
323 static int __vlan_del(struct net_bridge_vlan *v)
324 {
325 	struct net_bridge_vlan *masterv = v;
326 	struct net_bridge_vlan_group *vg;
327 	struct net_bridge_port *p = NULL;
328 	int err = 0;
329 
330 	if (br_vlan_is_master(v)) {
331 		vg = br_vlan_group(v->br);
332 	} else {
333 		p = v->port;
334 		vg = nbp_vlan_group(v->port);
335 		masterv = v->brvlan;
336 	}
337 
338 	__vlan_delete_pvid(vg, v->vid);
339 	if (p) {
340 		err = __vlan_vid_del(p->dev, p->br, v);
341 		if (err)
342 			goto out;
343 	} else {
344 		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
345 		if (err && err != -EOPNOTSUPP)
346 			goto out;
347 		err = 0;
348 	}
349 
350 	if (br_vlan_should_use(v)) {
351 		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
352 		vg->num_vlans--;
353 	}
354 
355 	if (masterv != v) {
356 		vlan_tunnel_info_del(vg, v);
357 		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
358 				       br_vlan_rht_params);
359 		__vlan_del_list(v);
360 		call_rcu(&v->rcu, nbp_vlan_rcu_free);
361 	}
362 
363 	br_vlan_put_master(masterv);
364 out:
365 	return err;
366 }
367 
368 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
369 {
370 	WARN_ON(!list_empty(&vg->vlan_list));
371 	rhashtable_destroy(&vg->vlan_hash);
372 	vlan_tunnel_deinit(vg);
373 	kfree(vg);
374 }
375 
376 static void __vlan_flush(struct net_bridge_vlan_group *vg)
377 {
378 	struct net_bridge_vlan *vlan, *tmp;
379 
380 	__vlan_delete_pvid(vg, vg->pvid);
381 	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
382 		__vlan_del(vlan);
383 }
384 
385 struct sk_buff *br_handle_vlan(struct net_bridge *br,
386 			       const struct net_bridge_port *p,
387 			       struct net_bridge_vlan_group *vg,
388 			       struct sk_buff *skb)
389 {
390 	struct br_vlan_stats *stats;
391 	struct net_bridge_vlan *v;
392 	u16 vid;
393 
394 	/* If this packet was not filtered at input, let it pass */
395 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
396 		goto out;
397 
398 	/* At this point, we know that the frame was filtered and contains
399 	 * a valid vlan id.  If the vlan id has untagged flag set,
400 	 * send untagged; otherwise, send tagged.
401 	 */
402 	br_vlan_get_tag(skb, &vid);
403 	v = br_vlan_find(vg, vid);
404 	/* Vlan entry must be configured at this point.  The
405 	 * only exception is the bridge is set in promisc mode and the
406 	 * packet is destined for the bridge device.  In this case
407 	 * pass the packet as is.
408 	 */
409 	if (!v || !br_vlan_should_use(v)) {
410 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
411 			goto out;
412 		} else {
413 			kfree_skb(skb);
414 			return NULL;
415 		}
416 	}
417 	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
418 		stats = this_cpu_ptr(v->stats);
419 		u64_stats_update_begin(&stats->syncp);
420 		stats->tx_bytes += skb->len;
421 		stats->tx_packets++;
422 		u64_stats_update_end(&stats->syncp);
423 	}
424 
425 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
426 		__vlan_hwaccel_clear_tag(skb);
427 
428 	if (p && (p->flags & BR_VLAN_TUNNEL) &&
429 	    br_handle_egress_vlan_tunnel(skb, v)) {
430 		kfree_skb(skb);
431 		return NULL;
432 	}
433 out:
434 	return skb;
435 }
436 
437 /* Called under RCU */
438 static bool __allowed_ingress(const struct net_bridge *br,
439 			      struct net_bridge_vlan_group *vg,
440 			      struct sk_buff *skb, u16 *vid)
441 {
442 	struct br_vlan_stats *stats;
443 	struct net_bridge_vlan *v;
444 	bool tagged;
445 
446 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
447 	/* If vlan tx offload is disabled on bridge device and frame was
448 	 * sent from vlan device on the bridge device, it does not have
449 	 * HW accelerated vlan tag.
450 	 */
451 	if (unlikely(!skb_vlan_tag_present(skb) &&
452 		     skb->protocol == br->vlan_proto)) {
453 		skb = skb_vlan_untag(skb);
454 		if (unlikely(!skb))
455 			return false;
456 	}
457 
458 	if (!br_vlan_get_tag(skb, vid)) {
459 		/* Tagged frame */
460 		if (skb->vlan_proto != br->vlan_proto) {
461 			/* Protocol-mismatch, empty out vlan_tci for new tag */
462 			skb_push(skb, ETH_HLEN);
463 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
464 							skb_vlan_tag_get(skb));
465 			if (unlikely(!skb))
466 				return false;
467 
468 			skb_pull(skb, ETH_HLEN);
469 			skb_reset_mac_len(skb);
470 			*vid = 0;
471 			tagged = false;
472 		} else {
473 			tagged = true;
474 		}
475 	} else {
476 		/* Untagged frame */
477 		tagged = false;
478 	}
479 
480 	if (!*vid) {
481 		u16 pvid = br_get_pvid(vg);
482 
483 		/* Frame had a tag with VID 0 or did not have a tag.
484 		 * See if pvid is set on this port.  That tells us which
485 		 * vlan untagged or priority-tagged traffic belongs to.
486 		 */
487 		if (!pvid)
488 			goto drop;
489 
490 		/* PVID is set on this port.  Any untagged or priority-tagged
491 		 * ingress frame is considered to belong to this vlan.
492 		 */
493 		*vid = pvid;
494 		if (likely(!tagged))
495 			/* Untagged Frame. */
496 			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
497 		else
498 			/* Priority-tagged Frame.
499 			 * At this point, we know that skb->vlan_tci VID
500 			 * field was 0.
501 			 * We update only VID field and preserve PCP field.
502 			 */
503 			skb->vlan_tci |= pvid;
504 
505 		/* if stats are disabled we can avoid the lookup */
506 		if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
507 			return true;
508 	}
509 	v = br_vlan_find(vg, *vid);
510 	if (!v || !br_vlan_should_use(v))
511 		goto drop;
512 
513 	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
514 		stats = this_cpu_ptr(v->stats);
515 		u64_stats_update_begin(&stats->syncp);
516 		stats->rx_bytes += skb->len;
517 		stats->rx_packets++;
518 		u64_stats_update_end(&stats->syncp);
519 	}
520 
521 	return true;
522 
523 drop:
524 	kfree_skb(skb);
525 	return false;
526 }
527 
528 bool br_allowed_ingress(const struct net_bridge *br,
529 			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
530 			u16 *vid)
531 {
532 	/* If VLAN filtering is disabled on the bridge, all packets are
533 	 * permitted.
534 	 */
535 	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
536 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
537 		return true;
538 	}
539 
540 	return __allowed_ingress(br, vg, skb, vid);
541 }
542 
543 /* Called under RCU. */
544 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
545 		       const struct sk_buff *skb)
546 {
547 	const struct net_bridge_vlan *v;
548 	u16 vid;
549 
550 	/* If this packet was not filtered at input, let it pass */
551 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
552 		return true;
553 
554 	br_vlan_get_tag(skb, &vid);
555 	v = br_vlan_find(vg, vid);
556 	if (v && br_vlan_should_use(v))
557 		return true;
558 
559 	return false;
560 }
561 
562 /* Called under RCU */
563 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
564 {
565 	struct net_bridge_vlan_group *vg;
566 	struct net_bridge *br = p->br;
567 
568 	/* If filtering was disabled at input, let it pass. */
569 	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
570 		return true;
571 
572 	vg = nbp_vlan_group_rcu(p);
573 	if (!vg || !vg->num_vlans)
574 		return false;
575 
576 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
577 		*vid = 0;
578 
579 	if (!*vid) {
580 		*vid = br_get_pvid(vg);
581 		if (!*vid)
582 			return false;
583 
584 		return true;
585 	}
586 
587 	if (br_vlan_find(vg, *vid))
588 		return true;
589 
590 	return false;
591 }
592 
593 static int br_vlan_add_existing(struct net_bridge *br,
594 				struct net_bridge_vlan_group *vg,
595 				struct net_bridge_vlan *vlan,
596 				u16 flags, bool *changed,
597 				struct netlink_ext_ack *extack)
598 {
599 	int err;
600 
601 	err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
602 	if (err && err != -EOPNOTSUPP)
603 		return err;
604 
605 	if (!br_vlan_is_brentry(vlan)) {
606 		/* Trying to change flags of non-existent bridge vlan */
607 		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
608 			err = -EINVAL;
609 			goto err_flags;
610 		}
611 		/* It was only kept for port vlans, now make it real */
612 		err = br_fdb_insert(br, NULL, br->dev->dev_addr,
613 				    vlan->vid);
614 		if (err) {
615 			br_err(br, "failed to insert local address into bridge forwarding table\n");
616 			goto err_fdb_insert;
617 		}
618 
619 		refcount_inc(&vlan->refcnt);
620 		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
621 		vg->num_vlans++;
622 		*changed = true;
623 	}
624 
625 	if (__vlan_add_flags(vlan, flags))
626 		*changed = true;
627 
628 	return 0;
629 
630 err_fdb_insert:
631 err_flags:
632 	br_switchdev_port_vlan_del(br->dev, vlan->vid);
633 	return err;
634 }
635 
636 /* Must be protected by RTNL.
637  * Must be called with vid in range from 1 to 4094 inclusive.
638  * changed must be true only if the vlan was created or updated
639  */
640 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
641 		struct netlink_ext_ack *extack)
642 {
643 	struct net_bridge_vlan_group *vg;
644 	struct net_bridge_vlan *vlan;
645 	int ret;
646 
647 	ASSERT_RTNL();
648 
649 	*changed = false;
650 	vg = br_vlan_group(br);
651 	vlan = br_vlan_find(vg, vid);
652 	if (vlan)
653 		return br_vlan_add_existing(br, vg, vlan, flags, changed,
654 					    extack);
655 
656 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
657 	if (!vlan)
658 		return -ENOMEM;
659 
660 	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
661 	if (!vlan->stats) {
662 		kfree(vlan);
663 		return -ENOMEM;
664 	}
665 	vlan->vid = vid;
666 	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
667 	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
668 	vlan->br = br;
669 	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
670 		refcount_set(&vlan->refcnt, 1);
671 	ret = __vlan_add(vlan, flags, extack);
672 	if (ret) {
673 		free_percpu(vlan->stats);
674 		kfree(vlan);
675 	} else {
676 		*changed = true;
677 	}
678 
679 	return ret;
680 }
681 
682 /* Must be protected by RTNL.
683  * Must be called with vid in range from 1 to 4094 inclusive.
684  */
685 int br_vlan_delete(struct net_bridge *br, u16 vid)
686 {
687 	struct net_bridge_vlan_group *vg;
688 	struct net_bridge_vlan *v;
689 
690 	ASSERT_RTNL();
691 
692 	vg = br_vlan_group(br);
693 	v = br_vlan_find(vg, vid);
694 	if (!v || !br_vlan_is_brentry(v))
695 		return -ENOENT;
696 
697 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
698 	br_fdb_delete_by_port(br, NULL, vid, 0);
699 
700 	vlan_tunnel_info_del(vg, v);
701 
702 	return __vlan_del(v);
703 }
704 
705 void br_vlan_flush(struct net_bridge *br)
706 {
707 	struct net_bridge_vlan_group *vg;
708 
709 	ASSERT_RTNL();
710 
711 	vg = br_vlan_group(br);
712 	__vlan_flush(vg);
713 	RCU_INIT_POINTER(br->vlgrp, NULL);
714 	synchronize_rcu();
715 	__vlan_group_free(vg);
716 }
717 
718 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
719 {
720 	if (!vg)
721 		return NULL;
722 
723 	return br_vlan_lookup(&vg->vlan_hash, vid);
724 }
725 
726 /* Must be protected by RTNL. */
727 static void recalculate_group_addr(struct net_bridge *br)
728 {
729 	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
730 		return;
731 
732 	spin_lock_bh(&br->lock);
733 	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
734 	    br->vlan_proto == htons(ETH_P_8021Q)) {
735 		/* Bridge Group Address */
736 		br->group_addr[5] = 0x00;
737 	} else { /* vlan_enabled && ETH_P_8021AD */
738 		/* Provider Bridge Group Address */
739 		br->group_addr[5] = 0x08;
740 	}
741 	spin_unlock_bh(&br->lock);
742 }
743 
744 /* Must be protected by RTNL. */
745 void br_recalculate_fwd_mask(struct net_bridge *br)
746 {
747 	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
748 	    br->vlan_proto == htons(ETH_P_8021Q))
749 		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
750 	else /* vlan_enabled && ETH_P_8021AD */
751 		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
752 					      ~(1u << br->group_addr[5]);
753 }
754 
755 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
756 {
757 	struct switchdev_attr attr = {
758 		.orig_dev = br->dev,
759 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
760 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
761 		.u.vlan_filtering = val,
762 	};
763 	int err;
764 
765 	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
766 		return 0;
767 
768 	err = switchdev_port_attr_set(br->dev, &attr);
769 	if (err && err != -EOPNOTSUPP)
770 		return err;
771 
772 	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
773 	br_manage_promisc(br);
774 	recalculate_group_addr(br);
775 	br_recalculate_fwd_mask(br);
776 
777 	return 0;
778 }
779 
780 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
781 {
782 	return __br_vlan_filter_toggle(br, val);
783 }
784 
785 bool br_vlan_enabled(const struct net_device *dev)
786 {
787 	struct net_bridge *br = netdev_priv(dev);
788 
789 	return br_opt_get(br, BROPT_VLAN_ENABLED);
790 }
791 EXPORT_SYMBOL_GPL(br_vlan_enabled);
792 
793 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
794 {
795 	int err = 0;
796 	struct net_bridge_port *p;
797 	struct net_bridge_vlan *vlan;
798 	struct net_bridge_vlan_group *vg;
799 	__be16 oldproto;
800 
801 	if (br->vlan_proto == proto)
802 		return 0;
803 
804 	/* Add VLANs for the new proto to the device filter. */
805 	list_for_each_entry(p, &br->port_list, list) {
806 		vg = nbp_vlan_group(p);
807 		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
808 			err = vlan_vid_add(p->dev, proto, vlan->vid);
809 			if (err)
810 				goto err_filt;
811 		}
812 	}
813 
814 	oldproto = br->vlan_proto;
815 	br->vlan_proto = proto;
816 
817 	recalculate_group_addr(br);
818 	br_recalculate_fwd_mask(br);
819 
820 	/* Delete VLANs for the old proto from the device filter. */
821 	list_for_each_entry(p, &br->port_list, list) {
822 		vg = nbp_vlan_group(p);
823 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
824 			vlan_vid_del(p->dev, oldproto, vlan->vid);
825 	}
826 
827 	return 0;
828 
829 err_filt:
830 	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
831 		vlan_vid_del(p->dev, proto, vlan->vid);
832 
833 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
834 		vg = nbp_vlan_group(p);
835 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
836 			vlan_vid_del(p->dev, proto, vlan->vid);
837 	}
838 
839 	return err;
840 }
841 
842 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
843 {
844 	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
845 		return -EPROTONOSUPPORT;
846 
847 	return __br_vlan_set_proto(br, htons(val));
848 }
849 
850 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
851 {
852 	switch (val) {
853 	case 0:
854 	case 1:
855 		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
856 		break;
857 	default:
858 		return -EINVAL;
859 	}
860 
861 	return 0;
862 }
863 
864 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
865 {
866 	struct net_bridge_port *p;
867 
868 	/* allow to change the option if there are no port vlans configured */
869 	list_for_each_entry(p, &br->port_list, list) {
870 		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
871 
872 		if (vg->num_vlans)
873 			return -EBUSY;
874 	}
875 
876 	switch (val) {
877 	case 0:
878 	case 1:
879 		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
880 		break;
881 	default:
882 		return -EINVAL;
883 	}
884 
885 	return 0;
886 }
887 
888 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
889 {
890 	struct net_bridge_vlan *v;
891 
892 	if (vid != vg->pvid)
893 		return false;
894 
895 	v = br_vlan_lookup(&vg->vlan_hash, vid);
896 	if (v && br_vlan_should_use(v) &&
897 	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
898 		return true;
899 
900 	return false;
901 }
902 
903 static void br_vlan_disable_default_pvid(struct net_bridge *br)
904 {
905 	struct net_bridge_port *p;
906 	u16 pvid = br->default_pvid;
907 
908 	/* Disable default_pvid on all ports where it is still
909 	 * configured.
910 	 */
911 	if (vlan_default_pvid(br_vlan_group(br), pvid))
912 		br_vlan_delete(br, pvid);
913 
914 	list_for_each_entry(p, &br->port_list, list) {
915 		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
916 			nbp_vlan_delete(p, pvid);
917 	}
918 
919 	br->default_pvid = 0;
920 }
921 
922 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
923 			       struct netlink_ext_ack *extack)
924 {
925 	const struct net_bridge_vlan *pvent;
926 	struct net_bridge_vlan_group *vg;
927 	struct net_bridge_port *p;
928 	unsigned long *changed;
929 	bool vlchange;
930 	u16 old_pvid;
931 	int err = 0;
932 
933 	if (!pvid) {
934 		br_vlan_disable_default_pvid(br);
935 		return 0;
936 	}
937 
938 	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
939 	if (!changed)
940 		return -ENOMEM;
941 
942 	old_pvid = br->default_pvid;
943 
944 	/* Update default_pvid config only if we do not conflict with
945 	 * user configuration.
946 	 */
947 	vg = br_vlan_group(br);
948 	pvent = br_vlan_find(vg, pvid);
949 	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
950 	    (!pvent || !br_vlan_should_use(pvent))) {
951 		err = br_vlan_add(br, pvid,
952 				  BRIDGE_VLAN_INFO_PVID |
953 				  BRIDGE_VLAN_INFO_UNTAGGED |
954 				  BRIDGE_VLAN_INFO_BRENTRY,
955 				  &vlchange, extack);
956 		if (err)
957 			goto out;
958 		br_vlan_delete(br, old_pvid);
959 		set_bit(0, changed);
960 	}
961 
962 	list_for_each_entry(p, &br->port_list, list) {
963 		/* Update default_pvid config only if we do not conflict with
964 		 * user configuration.
965 		 */
966 		vg = nbp_vlan_group(p);
967 		if ((old_pvid &&
968 		     !vlan_default_pvid(vg, old_pvid)) ||
969 		    br_vlan_find(vg, pvid))
970 			continue;
971 
972 		err = nbp_vlan_add(p, pvid,
973 				   BRIDGE_VLAN_INFO_PVID |
974 				   BRIDGE_VLAN_INFO_UNTAGGED,
975 				   &vlchange, extack);
976 		if (err)
977 			goto err_port;
978 		nbp_vlan_delete(p, old_pvid);
979 		set_bit(p->port_no, changed);
980 	}
981 
982 	br->default_pvid = pvid;
983 
984 out:
985 	bitmap_free(changed);
986 	return err;
987 
988 err_port:
989 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
990 		if (!test_bit(p->port_no, changed))
991 			continue;
992 
993 		if (old_pvid)
994 			nbp_vlan_add(p, old_pvid,
995 				     BRIDGE_VLAN_INFO_PVID |
996 				     BRIDGE_VLAN_INFO_UNTAGGED,
997 				     &vlchange, NULL);
998 		nbp_vlan_delete(p, pvid);
999 	}
1000 
1001 	if (test_bit(0, changed)) {
1002 		if (old_pvid)
1003 			br_vlan_add(br, old_pvid,
1004 				    BRIDGE_VLAN_INFO_PVID |
1005 				    BRIDGE_VLAN_INFO_UNTAGGED |
1006 				    BRIDGE_VLAN_INFO_BRENTRY,
1007 				    &vlchange, NULL);
1008 		br_vlan_delete(br, pvid);
1009 	}
1010 	goto out;
1011 }
1012 
1013 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1014 {
1015 	u16 pvid = val;
1016 	int err = 0;
1017 
1018 	if (val >= VLAN_VID_MASK)
1019 		return -EINVAL;
1020 
1021 	if (pvid == br->default_pvid)
1022 		goto out;
1023 
1024 	/* Only allow default pvid change when filtering is disabled */
1025 	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1026 		pr_info_once("Please disable vlan filtering to change default_pvid\n");
1027 		err = -EPERM;
1028 		goto out;
1029 	}
1030 	err = __br_vlan_set_default_pvid(br, pvid, NULL);
1031 out:
1032 	return err;
1033 }
1034 
1035 int br_vlan_init(struct net_bridge *br)
1036 {
1037 	struct net_bridge_vlan_group *vg;
1038 	int ret = -ENOMEM;
1039 	bool changed;
1040 
1041 	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1042 	if (!vg)
1043 		goto out;
1044 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1045 	if (ret)
1046 		goto err_rhtbl;
1047 	ret = vlan_tunnel_init(vg);
1048 	if (ret)
1049 		goto err_tunnel_init;
1050 	INIT_LIST_HEAD(&vg->vlan_list);
1051 	br->vlan_proto = htons(ETH_P_8021Q);
1052 	br->default_pvid = 1;
1053 	rcu_assign_pointer(br->vlgrp, vg);
1054 	ret = br_vlan_add(br, 1,
1055 			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
1056 			  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1057 	if (ret)
1058 		goto err_vlan_add;
1059 
1060 out:
1061 	return ret;
1062 
1063 err_vlan_add:
1064 	vlan_tunnel_deinit(vg);
1065 err_tunnel_init:
1066 	rhashtable_destroy(&vg->vlan_hash);
1067 err_rhtbl:
1068 	kfree(vg);
1069 
1070 	goto out;
1071 }
1072 
1073 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1074 {
1075 	struct switchdev_attr attr = {
1076 		.orig_dev = p->br->dev,
1077 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1078 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1079 		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1080 	};
1081 	struct net_bridge_vlan_group *vg;
1082 	int ret = -ENOMEM;
1083 
1084 	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1085 	if (!vg)
1086 		goto out;
1087 
1088 	ret = switchdev_port_attr_set(p->dev, &attr);
1089 	if (ret && ret != -EOPNOTSUPP)
1090 		goto err_vlan_enabled;
1091 
1092 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1093 	if (ret)
1094 		goto err_rhtbl;
1095 	ret = vlan_tunnel_init(vg);
1096 	if (ret)
1097 		goto err_tunnel_init;
1098 	INIT_LIST_HEAD(&vg->vlan_list);
1099 	rcu_assign_pointer(p->vlgrp, vg);
1100 	if (p->br->default_pvid) {
1101 		bool changed;
1102 
1103 		ret = nbp_vlan_add(p, p->br->default_pvid,
1104 				   BRIDGE_VLAN_INFO_PVID |
1105 				   BRIDGE_VLAN_INFO_UNTAGGED,
1106 				   &changed, extack);
1107 		if (ret)
1108 			goto err_vlan_add;
1109 	}
1110 out:
1111 	return ret;
1112 
1113 err_vlan_add:
1114 	RCU_INIT_POINTER(p->vlgrp, NULL);
1115 	synchronize_rcu();
1116 	vlan_tunnel_deinit(vg);
1117 err_tunnel_init:
1118 	rhashtable_destroy(&vg->vlan_hash);
1119 err_rhtbl:
1120 err_vlan_enabled:
1121 	kfree(vg);
1122 
1123 	goto out;
1124 }
1125 
1126 /* Must be protected by RTNL.
1127  * Must be called with vid in range from 1 to 4094 inclusive.
1128  * changed must be true only if the vlan was created or updated
1129  */
1130 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1131 		 bool *changed, struct netlink_ext_ack *extack)
1132 {
1133 	struct net_bridge_vlan *vlan;
1134 	int ret;
1135 
1136 	ASSERT_RTNL();
1137 
1138 	*changed = false;
1139 	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1140 	if (vlan) {
1141 		/* Pass the flags to the hardware bridge */
1142 		ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1143 		if (ret && ret != -EOPNOTSUPP)
1144 			return ret;
1145 		*changed = __vlan_add_flags(vlan, flags);
1146 
1147 		return 0;
1148 	}
1149 
1150 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1151 	if (!vlan)
1152 		return -ENOMEM;
1153 
1154 	vlan->vid = vid;
1155 	vlan->port = port;
1156 	ret = __vlan_add(vlan, flags, extack);
1157 	if (ret)
1158 		kfree(vlan);
1159 	else
1160 		*changed = true;
1161 
1162 	return ret;
1163 }
1164 
1165 /* Must be protected by RTNL.
1166  * Must be called with vid in range from 1 to 4094 inclusive.
1167  */
1168 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1169 {
1170 	struct net_bridge_vlan *v;
1171 
1172 	ASSERT_RTNL();
1173 
1174 	v = br_vlan_find(nbp_vlan_group(port), vid);
1175 	if (!v)
1176 		return -ENOENT;
1177 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1178 	br_fdb_delete_by_port(port->br, port, vid, 0);
1179 
1180 	return __vlan_del(v);
1181 }
1182 
1183 void nbp_vlan_flush(struct net_bridge_port *port)
1184 {
1185 	struct net_bridge_vlan_group *vg;
1186 
1187 	ASSERT_RTNL();
1188 
1189 	vg = nbp_vlan_group(port);
1190 	__vlan_flush(vg);
1191 	RCU_INIT_POINTER(port->vlgrp, NULL);
1192 	synchronize_rcu();
1193 	__vlan_group_free(vg);
1194 }
1195 
1196 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1197 		       struct br_vlan_stats *stats)
1198 {
1199 	int i;
1200 
1201 	memset(stats, 0, sizeof(*stats));
1202 	for_each_possible_cpu(i) {
1203 		u64 rxpackets, rxbytes, txpackets, txbytes;
1204 		struct br_vlan_stats *cpu_stats;
1205 		unsigned int start;
1206 
1207 		cpu_stats = per_cpu_ptr(v->stats, i);
1208 		do {
1209 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1210 			rxpackets = cpu_stats->rx_packets;
1211 			rxbytes = cpu_stats->rx_bytes;
1212 			txbytes = cpu_stats->tx_bytes;
1213 			txpackets = cpu_stats->tx_packets;
1214 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1215 
1216 		stats->rx_packets += rxpackets;
1217 		stats->rx_bytes += rxbytes;
1218 		stats->tx_bytes += txbytes;
1219 		stats->tx_packets += txpackets;
1220 	}
1221 }
1222 
1223 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1224 {
1225 	struct net_bridge_vlan_group *vg;
1226 	struct net_bridge_port *p;
1227 
1228 	ASSERT_RTNL();
1229 	p = br_port_get_check_rtnl(dev);
1230 	if (p)
1231 		vg = nbp_vlan_group(p);
1232 	else if (netif_is_bridge_master(dev))
1233 		vg = br_vlan_group(netdev_priv(dev));
1234 	else
1235 		return -EINVAL;
1236 
1237 	*p_pvid = br_get_pvid(vg);
1238 	return 0;
1239 }
1240 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1241 
1242 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1243 		     struct bridge_vlan_info *p_vinfo)
1244 {
1245 	struct net_bridge_vlan_group *vg;
1246 	struct net_bridge_vlan *v;
1247 	struct net_bridge_port *p;
1248 
1249 	ASSERT_RTNL();
1250 	p = br_port_get_check_rtnl(dev);
1251 	if (p)
1252 		vg = nbp_vlan_group(p);
1253 	else if (netif_is_bridge_master(dev))
1254 		vg = br_vlan_group(netdev_priv(dev));
1255 	else
1256 		return -EINVAL;
1257 
1258 	v = br_vlan_find(vg, vid);
1259 	if (!v)
1260 		return -ENOENT;
1261 
1262 	p_vinfo->vid = vid;
1263 	p_vinfo->flags = v->flags;
1264 	return 0;
1265 }
1266 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1267