xref: /openbmc/linux/net/bridge/br_vlan.c (revision 2c6467d2)
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6 
7 #include "br_private.h"
8 #include "br_private_tunnel.h"
9 
10 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
11 			      const void *ptr)
12 {
13 	const struct net_bridge_vlan *vle = ptr;
14 	u16 vid = *(u16 *)arg->key;
15 
16 	return vle->vid != vid;
17 }
18 
19 static const struct rhashtable_params br_vlan_rht_params = {
20 	.head_offset = offsetof(struct net_bridge_vlan, vnode),
21 	.key_offset = offsetof(struct net_bridge_vlan, vid),
22 	.key_len = sizeof(u16),
23 	.nelem_hint = 3,
24 	.locks_mul = 1,
25 	.max_size = VLAN_N_VID,
26 	.obj_cmpfn = br_vlan_cmp,
27 	.automatic_shrinking = true,
28 };
29 
30 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
31 {
32 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
33 }
34 
35 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
36 {
37 	if (vg->pvid == vid)
38 		return false;
39 
40 	smp_wmb();
41 	vg->pvid = vid;
42 
43 	return true;
44 }
45 
46 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
47 {
48 	if (vg->pvid != vid)
49 		return false;
50 
51 	smp_wmb();
52 	vg->pvid = 0;
53 
54 	return true;
55 }
56 
57 /* return true if anything changed, false otherwise */
58 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
59 {
60 	struct net_bridge_vlan_group *vg;
61 	u16 old_flags = v->flags;
62 	bool ret;
63 
64 	if (br_vlan_is_master(v))
65 		vg = br_vlan_group(v->br);
66 	else
67 		vg = nbp_vlan_group(v->port);
68 
69 	if (flags & BRIDGE_VLAN_INFO_PVID)
70 		ret = __vlan_add_pvid(vg, v->vid);
71 	else
72 		ret = __vlan_delete_pvid(vg, v->vid);
73 
74 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
75 		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
76 	else
77 		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
78 
79 	return ret || !!(old_flags ^ v->flags);
80 }
81 
82 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
83 			  u16 vid, u16 flags, struct netlink_ext_ack *extack)
84 {
85 	int err;
86 
87 	/* Try switchdev op first. In case it is not supported, fallback to
88 	 * 8021q add.
89 	 */
90 	err = br_switchdev_port_vlan_add(dev, vid, flags, extack);
91 	if (err == -EOPNOTSUPP)
92 		return vlan_vid_add(dev, br->vlan_proto, vid);
93 	return err;
94 }
95 
96 static void __vlan_add_list(struct net_bridge_vlan *v)
97 {
98 	struct net_bridge_vlan_group *vg;
99 	struct list_head *headp, *hpos;
100 	struct net_bridge_vlan *vent;
101 
102 	if (br_vlan_is_master(v))
103 		vg = br_vlan_group(v->br);
104 	else
105 		vg = nbp_vlan_group(v->port);
106 
107 	headp = &vg->vlan_list;
108 	list_for_each_prev(hpos, headp) {
109 		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
110 		if (v->vid < vent->vid)
111 			continue;
112 		else
113 			break;
114 	}
115 	list_add_rcu(&v->vlist, hpos);
116 }
117 
118 static void __vlan_del_list(struct net_bridge_vlan *v)
119 {
120 	list_del_rcu(&v->vlist);
121 }
122 
123 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
124 			  u16 vid)
125 {
126 	int err;
127 
128 	/* Try switchdev op first. In case it is not supported, fallback to
129 	 * 8021q del.
130 	 */
131 	err = br_switchdev_port_vlan_del(dev, vid);
132 	if (err == -EOPNOTSUPP) {
133 		vlan_vid_del(dev, br->vlan_proto, vid);
134 		return 0;
135 	}
136 	return err;
137 }
138 
139 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
140  * a reference is taken to the master vlan before returning.
141  */
142 static struct net_bridge_vlan *
143 br_vlan_get_master(struct net_bridge *br, u16 vid,
144 		   struct netlink_ext_ack *extack)
145 {
146 	struct net_bridge_vlan_group *vg;
147 	struct net_bridge_vlan *masterv;
148 
149 	vg = br_vlan_group(br);
150 	masterv = br_vlan_find(vg, vid);
151 	if (!masterv) {
152 		bool changed;
153 
154 		/* missing global ctx, create it now */
155 		if (br_vlan_add(br, vid, 0, &changed, extack))
156 			return NULL;
157 		masterv = br_vlan_find(vg, vid);
158 		if (WARN_ON(!masterv))
159 			return NULL;
160 		refcount_set(&masterv->refcnt, 1);
161 		return masterv;
162 	}
163 	refcount_inc(&masterv->refcnt);
164 
165 	return masterv;
166 }
167 
168 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
169 {
170 	struct net_bridge_vlan *v;
171 
172 	v = container_of(rcu, struct net_bridge_vlan, rcu);
173 	WARN_ON(!br_vlan_is_master(v));
174 	free_percpu(v->stats);
175 	v->stats = NULL;
176 	kfree(v);
177 }
178 
179 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
180 {
181 	struct net_bridge_vlan_group *vg;
182 
183 	if (!br_vlan_is_master(masterv))
184 		return;
185 
186 	vg = br_vlan_group(masterv->br);
187 	if (refcount_dec_and_test(&masterv->refcnt)) {
188 		rhashtable_remove_fast(&vg->vlan_hash,
189 				       &masterv->vnode, br_vlan_rht_params);
190 		__vlan_del_list(masterv);
191 		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
192 	}
193 }
194 
195 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
196 {
197 	struct net_bridge_vlan *v;
198 
199 	v = container_of(rcu, struct net_bridge_vlan, rcu);
200 	WARN_ON(br_vlan_is_master(v));
201 	/* if we had per-port stats configured then free them here */
202 	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
203 		free_percpu(v->stats);
204 	v->stats = NULL;
205 	kfree(v);
206 }
207 
208 /* This is the shared VLAN add function which works for both ports and bridge
209  * devices. There are four possible calls to this function in terms of the
210  * vlan entry type:
211  * 1. vlan is being added on a port (no master flags, global entry exists)
212  * 2. vlan is being added on a bridge (both master and brentry flags)
213  * 3. vlan is being added on a port, but a global entry didn't exist which
214  *    is being created right now (master flag set, brentry flag unset), the
215  *    global entry is used for global per-vlan features, but not for filtering
216  * 4. same as 3 but with both master and brentry flags set so the entry
217  *    will be used for filtering in both the port and the bridge
218  */
219 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
220 		      struct netlink_ext_ack *extack)
221 {
222 	struct net_bridge_vlan *masterv = NULL;
223 	struct net_bridge_port *p = NULL;
224 	struct net_bridge_vlan_group *vg;
225 	struct net_device *dev;
226 	struct net_bridge *br;
227 	int err;
228 
229 	if (br_vlan_is_master(v)) {
230 		br = v->br;
231 		dev = br->dev;
232 		vg = br_vlan_group(br);
233 	} else {
234 		p = v->port;
235 		br = p->br;
236 		dev = p->dev;
237 		vg = nbp_vlan_group(p);
238 	}
239 
240 	if (p) {
241 		/* Add VLAN to the device filter if it is supported.
242 		 * This ensures tagged traffic enters the bridge when
243 		 * promiscuous mode is disabled by br_manage_promisc().
244 		 */
245 		err = __vlan_vid_add(dev, br, v->vid, flags, extack);
246 		if (err)
247 			goto out;
248 
249 		/* need to work on the master vlan too */
250 		if (flags & BRIDGE_VLAN_INFO_MASTER) {
251 			bool changed;
252 
253 			err = br_vlan_add(br, v->vid,
254 					  flags | BRIDGE_VLAN_INFO_BRENTRY,
255 					  &changed, extack);
256 			if (err)
257 				goto out_filt;
258 		}
259 
260 		masterv = br_vlan_get_master(br, v->vid, extack);
261 		if (!masterv)
262 			goto out_filt;
263 		v->brvlan = masterv;
264 		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
265 			v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
266 			if (!v->stats) {
267 				err = -ENOMEM;
268 				goto out_filt;
269 			}
270 			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
271 		} else {
272 			v->stats = masterv->stats;
273 		}
274 	} else {
275 		err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
276 		if (err && err != -EOPNOTSUPP)
277 			goto out;
278 	}
279 
280 	/* Add the dev mac and count the vlan only if it's usable */
281 	if (br_vlan_should_use(v)) {
282 		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
283 		if (err) {
284 			br_err(br, "failed insert local address into bridge forwarding table\n");
285 			goto out_filt;
286 		}
287 		vg->num_vlans++;
288 	}
289 
290 	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
291 					    br_vlan_rht_params);
292 	if (err)
293 		goto out_fdb_insert;
294 
295 	__vlan_add_list(v);
296 	__vlan_add_flags(v, flags);
297 out:
298 	return err;
299 
300 out_fdb_insert:
301 	if (br_vlan_should_use(v)) {
302 		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
303 		vg->num_vlans--;
304 	}
305 
306 out_filt:
307 	if (p) {
308 		__vlan_vid_del(dev, br, v->vid);
309 		if (masterv) {
310 			if (v->stats && masterv->stats != v->stats)
311 				free_percpu(v->stats);
312 			v->stats = NULL;
313 
314 			br_vlan_put_master(masterv);
315 			v->brvlan = NULL;
316 		}
317 	} else {
318 		br_switchdev_port_vlan_del(dev, v->vid);
319 	}
320 
321 	goto out;
322 }
323 
324 static int __vlan_del(struct net_bridge_vlan *v)
325 {
326 	struct net_bridge_vlan *masterv = v;
327 	struct net_bridge_vlan_group *vg;
328 	struct net_bridge_port *p = NULL;
329 	int err = 0;
330 
331 	if (br_vlan_is_master(v)) {
332 		vg = br_vlan_group(v->br);
333 	} else {
334 		p = v->port;
335 		vg = nbp_vlan_group(v->port);
336 		masterv = v->brvlan;
337 	}
338 
339 	__vlan_delete_pvid(vg, v->vid);
340 	if (p) {
341 		err = __vlan_vid_del(p->dev, p->br, v->vid);
342 		if (err)
343 			goto out;
344 	} else {
345 		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
346 		if (err && err != -EOPNOTSUPP)
347 			goto out;
348 		err = 0;
349 	}
350 
351 	if (br_vlan_should_use(v)) {
352 		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
353 		vg->num_vlans--;
354 	}
355 
356 	if (masterv != v) {
357 		vlan_tunnel_info_del(vg, v);
358 		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
359 				       br_vlan_rht_params);
360 		__vlan_del_list(v);
361 		call_rcu(&v->rcu, nbp_vlan_rcu_free);
362 	}
363 
364 	br_vlan_put_master(masterv);
365 out:
366 	return err;
367 }
368 
369 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
370 {
371 	WARN_ON(!list_empty(&vg->vlan_list));
372 	rhashtable_destroy(&vg->vlan_hash);
373 	vlan_tunnel_deinit(vg);
374 	kfree(vg);
375 }
376 
377 static void __vlan_flush(struct net_bridge_vlan_group *vg)
378 {
379 	struct net_bridge_vlan *vlan, *tmp;
380 
381 	__vlan_delete_pvid(vg, vg->pvid);
382 	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
383 		__vlan_del(vlan);
384 }
385 
386 struct sk_buff *br_handle_vlan(struct net_bridge *br,
387 			       const struct net_bridge_port *p,
388 			       struct net_bridge_vlan_group *vg,
389 			       struct sk_buff *skb)
390 {
391 	struct br_vlan_stats *stats;
392 	struct net_bridge_vlan *v;
393 	u16 vid;
394 
395 	/* If this packet was not filtered at input, let it pass */
396 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
397 		goto out;
398 
399 	/* At this point, we know that the frame was filtered and contains
400 	 * a valid vlan id.  If the vlan id has untagged flag set,
401 	 * send untagged; otherwise, send tagged.
402 	 */
403 	br_vlan_get_tag(skb, &vid);
404 	v = br_vlan_find(vg, vid);
405 	/* Vlan entry must be configured at this point.  The
406 	 * only exception is the bridge is set in promisc mode and the
407 	 * packet is destined for the bridge device.  In this case
408 	 * pass the packet as is.
409 	 */
410 	if (!v || !br_vlan_should_use(v)) {
411 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
412 			goto out;
413 		} else {
414 			kfree_skb(skb);
415 			return NULL;
416 		}
417 	}
418 	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
419 		stats = this_cpu_ptr(v->stats);
420 		u64_stats_update_begin(&stats->syncp);
421 		stats->tx_bytes += skb->len;
422 		stats->tx_packets++;
423 		u64_stats_update_end(&stats->syncp);
424 	}
425 
426 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
427 		__vlan_hwaccel_clear_tag(skb);
428 
429 	if (p && (p->flags & BR_VLAN_TUNNEL) &&
430 	    br_handle_egress_vlan_tunnel(skb, v)) {
431 		kfree_skb(skb);
432 		return NULL;
433 	}
434 out:
435 	return skb;
436 }
437 
438 /* Called under RCU */
439 static bool __allowed_ingress(const struct net_bridge *br,
440 			      struct net_bridge_vlan_group *vg,
441 			      struct sk_buff *skb, u16 *vid)
442 {
443 	struct br_vlan_stats *stats;
444 	struct net_bridge_vlan *v;
445 	bool tagged;
446 
447 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
448 	/* If vlan tx offload is disabled on bridge device and frame was
449 	 * sent from vlan device on the bridge device, it does not have
450 	 * HW accelerated vlan tag.
451 	 */
452 	if (unlikely(!skb_vlan_tag_present(skb) &&
453 		     skb->protocol == br->vlan_proto)) {
454 		skb = skb_vlan_untag(skb);
455 		if (unlikely(!skb))
456 			return false;
457 	}
458 
459 	if (!br_vlan_get_tag(skb, vid)) {
460 		/* Tagged frame */
461 		if (skb->vlan_proto != br->vlan_proto) {
462 			/* Protocol-mismatch, empty out vlan_tci for new tag */
463 			skb_push(skb, ETH_HLEN);
464 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
465 							skb_vlan_tag_get(skb));
466 			if (unlikely(!skb))
467 				return false;
468 
469 			skb_pull(skb, ETH_HLEN);
470 			skb_reset_mac_len(skb);
471 			*vid = 0;
472 			tagged = false;
473 		} else {
474 			tagged = true;
475 		}
476 	} else {
477 		/* Untagged frame */
478 		tagged = false;
479 	}
480 
481 	if (!*vid) {
482 		u16 pvid = br_get_pvid(vg);
483 
484 		/* Frame had a tag with VID 0 or did not have a tag.
485 		 * See if pvid is set on this port.  That tells us which
486 		 * vlan untagged or priority-tagged traffic belongs to.
487 		 */
488 		if (!pvid)
489 			goto drop;
490 
491 		/* PVID is set on this port.  Any untagged or priority-tagged
492 		 * ingress frame is considered to belong to this vlan.
493 		 */
494 		*vid = pvid;
495 		if (likely(!tagged))
496 			/* Untagged Frame. */
497 			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
498 		else
499 			/* Priority-tagged Frame.
500 			 * At this point, we know that skb->vlan_tci VID
501 			 * field was 0.
502 			 * We update only VID field and preserve PCP field.
503 			 */
504 			skb->vlan_tci |= pvid;
505 
506 		/* if stats are disabled we can avoid the lookup */
507 		if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
508 			return true;
509 	}
510 	v = br_vlan_find(vg, *vid);
511 	if (!v || !br_vlan_should_use(v))
512 		goto drop;
513 
514 	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
515 		stats = this_cpu_ptr(v->stats);
516 		u64_stats_update_begin(&stats->syncp);
517 		stats->rx_bytes += skb->len;
518 		stats->rx_packets++;
519 		u64_stats_update_end(&stats->syncp);
520 	}
521 
522 	return true;
523 
524 drop:
525 	kfree_skb(skb);
526 	return false;
527 }
528 
529 bool br_allowed_ingress(const struct net_bridge *br,
530 			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
531 			u16 *vid)
532 {
533 	/* If VLAN filtering is disabled on the bridge, all packets are
534 	 * permitted.
535 	 */
536 	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
537 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
538 		return true;
539 	}
540 
541 	return __allowed_ingress(br, vg, skb, vid);
542 }
543 
544 /* Called under RCU. */
545 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
546 		       const struct sk_buff *skb)
547 {
548 	const struct net_bridge_vlan *v;
549 	u16 vid;
550 
551 	/* If this packet was not filtered at input, let it pass */
552 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
553 		return true;
554 
555 	br_vlan_get_tag(skb, &vid);
556 	v = br_vlan_find(vg, vid);
557 	if (v && br_vlan_should_use(v))
558 		return true;
559 
560 	return false;
561 }
562 
563 /* Called under RCU */
564 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
565 {
566 	struct net_bridge_vlan_group *vg;
567 	struct net_bridge *br = p->br;
568 
569 	/* If filtering was disabled at input, let it pass. */
570 	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
571 		return true;
572 
573 	vg = nbp_vlan_group_rcu(p);
574 	if (!vg || !vg->num_vlans)
575 		return false;
576 
577 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
578 		*vid = 0;
579 
580 	if (!*vid) {
581 		*vid = br_get_pvid(vg);
582 		if (!*vid)
583 			return false;
584 
585 		return true;
586 	}
587 
588 	if (br_vlan_find(vg, *vid))
589 		return true;
590 
591 	return false;
592 }
593 
594 static int br_vlan_add_existing(struct net_bridge *br,
595 				struct net_bridge_vlan_group *vg,
596 				struct net_bridge_vlan *vlan,
597 				u16 flags, bool *changed,
598 				struct netlink_ext_ack *extack)
599 {
600 	int err;
601 
602 	err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
603 	if (err && err != -EOPNOTSUPP)
604 		return err;
605 
606 	if (!br_vlan_is_brentry(vlan)) {
607 		/* Trying to change flags of non-existent bridge vlan */
608 		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
609 			err = -EINVAL;
610 			goto err_flags;
611 		}
612 		/* It was only kept for port vlans, now make it real */
613 		err = br_fdb_insert(br, NULL, br->dev->dev_addr,
614 				    vlan->vid);
615 		if (err) {
616 			br_err(br, "failed to insert local address into bridge forwarding table\n");
617 			goto err_fdb_insert;
618 		}
619 
620 		refcount_inc(&vlan->refcnt);
621 		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
622 		vg->num_vlans++;
623 		*changed = true;
624 	}
625 
626 	if (__vlan_add_flags(vlan, flags))
627 		*changed = true;
628 
629 	return 0;
630 
631 err_fdb_insert:
632 err_flags:
633 	br_switchdev_port_vlan_del(br->dev, vlan->vid);
634 	return err;
635 }
636 
637 /* Must be protected by RTNL.
638  * Must be called with vid in range from 1 to 4094 inclusive.
639  * changed must be true only if the vlan was created or updated
640  */
641 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
642 		struct netlink_ext_ack *extack)
643 {
644 	struct net_bridge_vlan_group *vg;
645 	struct net_bridge_vlan *vlan;
646 	int ret;
647 
648 	ASSERT_RTNL();
649 
650 	*changed = false;
651 	vg = br_vlan_group(br);
652 	vlan = br_vlan_find(vg, vid);
653 	if (vlan)
654 		return br_vlan_add_existing(br, vg, vlan, flags, changed,
655 					    extack);
656 
657 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
658 	if (!vlan)
659 		return -ENOMEM;
660 
661 	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
662 	if (!vlan->stats) {
663 		kfree(vlan);
664 		return -ENOMEM;
665 	}
666 	vlan->vid = vid;
667 	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
668 	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
669 	vlan->br = br;
670 	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
671 		refcount_set(&vlan->refcnt, 1);
672 	ret = __vlan_add(vlan, flags, extack);
673 	if (ret) {
674 		free_percpu(vlan->stats);
675 		kfree(vlan);
676 	} else {
677 		*changed = true;
678 	}
679 
680 	return ret;
681 }
682 
683 /* Must be protected by RTNL.
684  * Must be called with vid in range from 1 to 4094 inclusive.
685  */
686 int br_vlan_delete(struct net_bridge *br, u16 vid)
687 {
688 	struct net_bridge_vlan_group *vg;
689 	struct net_bridge_vlan *v;
690 
691 	ASSERT_RTNL();
692 
693 	vg = br_vlan_group(br);
694 	v = br_vlan_find(vg, vid);
695 	if (!v || !br_vlan_is_brentry(v))
696 		return -ENOENT;
697 
698 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
699 	br_fdb_delete_by_port(br, NULL, vid, 0);
700 
701 	vlan_tunnel_info_del(vg, v);
702 
703 	return __vlan_del(v);
704 }
705 
706 void br_vlan_flush(struct net_bridge *br)
707 {
708 	struct net_bridge_vlan_group *vg;
709 
710 	ASSERT_RTNL();
711 
712 	vg = br_vlan_group(br);
713 	__vlan_flush(vg);
714 	RCU_INIT_POINTER(br->vlgrp, NULL);
715 	synchronize_rcu();
716 	__vlan_group_free(vg);
717 }
718 
719 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
720 {
721 	if (!vg)
722 		return NULL;
723 
724 	return br_vlan_lookup(&vg->vlan_hash, vid);
725 }
726 
727 /* Must be protected by RTNL. */
728 static void recalculate_group_addr(struct net_bridge *br)
729 {
730 	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
731 		return;
732 
733 	spin_lock_bh(&br->lock);
734 	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
735 	    br->vlan_proto == htons(ETH_P_8021Q)) {
736 		/* Bridge Group Address */
737 		br->group_addr[5] = 0x00;
738 	} else { /* vlan_enabled && ETH_P_8021AD */
739 		/* Provider Bridge Group Address */
740 		br->group_addr[5] = 0x08;
741 	}
742 	spin_unlock_bh(&br->lock);
743 }
744 
745 /* Must be protected by RTNL. */
746 void br_recalculate_fwd_mask(struct net_bridge *br)
747 {
748 	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
749 	    br->vlan_proto == htons(ETH_P_8021Q))
750 		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
751 	else /* vlan_enabled && ETH_P_8021AD */
752 		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
753 					      ~(1u << br->group_addr[5]);
754 }
755 
756 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
757 {
758 	struct switchdev_attr attr = {
759 		.orig_dev = br->dev,
760 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
761 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
762 		.u.vlan_filtering = val,
763 	};
764 	int err;
765 
766 	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
767 		return 0;
768 
769 	err = switchdev_port_attr_set(br->dev, &attr);
770 	if (err && err != -EOPNOTSUPP)
771 		return err;
772 
773 	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
774 	br_manage_promisc(br);
775 	recalculate_group_addr(br);
776 	br_recalculate_fwd_mask(br);
777 
778 	return 0;
779 }
780 
781 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
782 {
783 	return __br_vlan_filter_toggle(br, val);
784 }
785 
786 bool br_vlan_enabled(const struct net_device *dev)
787 {
788 	struct net_bridge *br = netdev_priv(dev);
789 
790 	return br_opt_get(br, BROPT_VLAN_ENABLED);
791 }
792 EXPORT_SYMBOL_GPL(br_vlan_enabled);
793 
794 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
795 {
796 	int err = 0;
797 	struct net_bridge_port *p;
798 	struct net_bridge_vlan *vlan;
799 	struct net_bridge_vlan_group *vg;
800 	__be16 oldproto;
801 
802 	if (br->vlan_proto == proto)
803 		return 0;
804 
805 	/* Add VLANs for the new proto to the device filter. */
806 	list_for_each_entry(p, &br->port_list, list) {
807 		vg = nbp_vlan_group(p);
808 		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
809 			err = vlan_vid_add(p->dev, proto, vlan->vid);
810 			if (err)
811 				goto err_filt;
812 		}
813 	}
814 
815 	oldproto = br->vlan_proto;
816 	br->vlan_proto = proto;
817 
818 	recalculate_group_addr(br);
819 	br_recalculate_fwd_mask(br);
820 
821 	/* Delete VLANs for the old proto from the device filter. */
822 	list_for_each_entry(p, &br->port_list, list) {
823 		vg = nbp_vlan_group(p);
824 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
825 			vlan_vid_del(p->dev, oldproto, vlan->vid);
826 	}
827 
828 	return 0;
829 
830 err_filt:
831 	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
832 		vlan_vid_del(p->dev, proto, vlan->vid);
833 
834 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
835 		vg = nbp_vlan_group(p);
836 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
837 			vlan_vid_del(p->dev, proto, vlan->vid);
838 	}
839 
840 	return err;
841 }
842 
843 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
844 {
845 	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
846 		return -EPROTONOSUPPORT;
847 
848 	return __br_vlan_set_proto(br, htons(val));
849 }
850 
851 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
852 {
853 	switch (val) {
854 	case 0:
855 	case 1:
856 		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
857 		break;
858 	default:
859 		return -EINVAL;
860 	}
861 
862 	return 0;
863 }
864 
865 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
866 {
867 	struct net_bridge_port *p;
868 
869 	/* allow to change the option if there are no port vlans configured */
870 	list_for_each_entry(p, &br->port_list, list) {
871 		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
872 
873 		if (vg->num_vlans)
874 			return -EBUSY;
875 	}
876 
877 	switch (val) {
878 	case 0:
879 	case 1:
880 		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
881 		break;
882 	default:
883 		return -EINVAL;
884 	}
885 
886 	return 0;
887 }
888 
889 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
890 {
891 	struct net_bridge_vlan *v;
892 
893 	if (vid != vg->pvid)
894 		return false;
895 
896 	v = br_vlan_lookup(&vg->vlan_hash, vid);
897 	if (v && br_vlan_should_use(v) &&
898 	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
899 		return true;
900 
901 	return false;
902 }
903 
904 static void br_vlan_disable_default_pvid(struct net_bridge *br)
905 {
906 	struct net_bridge_port *p;
907 	u16 pvid = br->default_pvid;
908 
909 	/* Disable default_pvid on all ports where it is still
910 	 * configured.
911 	 */
912 	if (vlan_default_pvid(br_vlan_group(br), pvid))
913 		br_vlan_delete(br, pvid);
914 
915 	list_for_each_entry(p, &br->port_list, list) {
916 		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
917 			nbp_vlan_delete(p, pvid);
918 	}
919 
920 	br->default_pvid = 0;
921 }
922 
923 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
924 			       struct netlink_ext_ack *extack)
925 {
926 	const struct net_bridge_vlan *pvent;
927 	struct net_bridge_vlan_group *vg;
928 	struct net_bridge_port *p;
929 	unsigned long *changed;
930 	bool vlchange;
931 	u16 old_pvid;
932 	int err = 0;
933 
934 	if (!pvid) {
935 		br_vlan_disable_default_pvid(br);
936 		return 0;
937 	}
938 
939 	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
940 	if (!changed)
941 		return -ENOMEM;
942 
943 	old_pvid = br->default_pvid;
944 
945 	/* Update default_pvid config only if we do not conflict with
946 	 * user configuration.
947 	 */
948 	vg = br_vlan_group(br);
949 	pvent = br_vlan_find(vg, pvid);
950 	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
951 	    (!pvent || !br_vlan_should_use(pvent))) {
952 		err = br_vlan_add(br, pvid,
953 				  BRIDGE_VLAN_INFO_PVID |
954 				  BRIDGE_VLAN_INFO_UNTAGGED |
955 				  BRIDGE_VLAN_INFO_BRENTRY,
956 				  &vlchange, extack);
957 		if (err)
958 			goto out;
959 		br_vlan_delete(br, old_pvid);
960 		set_bit(0, changed);
961 	}
962 
963 	list_for_each_entry(p, &br->port_list, list) {
964 		/* Update default_pvid config only if we do not conflict with
965 		 * user configuration.
966 		 */
967 		vg = nbp_vlan_group(p);
968 		if ((old_pvid &&
969 		     !vlan_default_pvid(vg, old_pvid)) ||
970 		    br_vlan_find(vg, pvid))
971 			continue;
972 
973 		err = nbp_vlan_add(p, pvid,
974 				   BRIDGE_VLAN_INFO_PVID |
975 				   BRIDGE_VLAN_INFO_UNTAGGED,
976 				   &vlchange, extack);
977 		if (err)
978 			goto err_port;
979 		nbp_vlan_delete(p, old_pvid);
980 		set_bit(p->port_no, changed);
981 	}
982 
983 	br->default_pvid = pvid;
984 
985 out:
986 	bitmap_free(changed);
987 	return err;
988 
989 err_port:
990 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
991 		if (!test_bit(p->port_no, changed))
992 			continue;
993 
994 		if (old_pvid)
995 			nbp_vlan_add(p, old_pvid,
996 				     BRIDGE_VLAN_INFO_PVID |
997 				     BRIDGE_VLAN_INFO_UNTAGGED,
998 				     &vlchange, NULL);
999 		nbp_vlan_delete(p, pvid);
1000 	}
1001 
1002 	if (test_bit(0, changed)) {
1003 		if (old_pvid)
1004 			br_vlan_add(br, old_pvid,
1005 				    BRIDGE_VLAN_INFO_PVID |
1006 				    BRIDGE_VLAN_INFO_UNTAGGED |
1007 				    BRIDGE_VLAN_INFO_BRENTRY,
1008 				    &vlchange, NULL);
1009 		br_vlan_delete(br, pvid);
1010 	}
1011 	goto out;
1012 }
1013 
1014 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1015 {
1016 	u16 pvid = val;
1017 	int err = 0;
1018 
1019 	if (val >= VLAN_VID_MASK)
1020 		return -EINVAL;
1021 
1022 	if (pvid == br->default_pvid)
1023 		goto out;
1024 
1025 	/* Only allow default pvid change when filtering is disabled */
1026 	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1027 		pr_info_once("Please disable vlan filtering to change default_pvid\n");
1028 		err = -EPERM;
1029 		goto out;
1030 	}
1031 	err = __br_vlan_set_default_pvid(br, pvid, NULL);
1032 out:
1033 	return err;
1034 }
1035 
1036 int br_vlan_init(struct net_bridge *br)
1037 {
1038 	struct net_bridge_vlan_group *vg;
1039 	int ret = -ENOMEM;
1040 	bool changed;
1041 
1042 	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1043 	if (!vg)
1044 		goto out;
1045 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1046 	if (ret)
1047 		goto err_rhtbl;
1048 	ret = vlan_tunnel_init(vg);
1049 	if (ret)
1050 		goto err_tunnel_init;
1051 	INIT_LIST_HEAD(&vg->vlan_list);
1052 	br->vlan_proto = htons(ETH_P_8021Q);
1053 	br->default_pvid = 1;
1054 	rcu_assign_pointer(br->vlgrp, vg);
1055 	ret = br_vlan_add(br, 1,
1056 			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
1057 			  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1058 	if (ret)
1059 		goto err_vlan_add;
1060 
1061 out:
1062 	return ret;
1063 
1064 err_vlan_add:
1065 	vlan_tunnel_deinit(vg);
1066 err_tunnel_init:
1067 	rhashtable_destroy(&vg->vlan_hash);
1068 err_rhtbl:
1069 	kfree(vg);
1070 
1071 	goto out;
1072 }
1073 
1074 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1075 {
1076 	struct switchdev_attr attr = {
1077 		.orig_dev = p->br->dev,
1078 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1079 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1080 		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1081 	};
1082 	struct net_bridge_vlan_group *vg;
1083 	int ret = -ENOMEM;
1084 
1085 	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1086 	if (!vg)
1087 		goto out;
1088 
1089 	ret = switchdev_port_attr_set(p->dev, &attr);
1090 	if (ret && ret != -EOPNOTSUPP)
1091 		goto err_vlan_enabled;
1092 
1093 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1094 	if (ret)
1095 		goto err_rhtbl;
1096 	ret = vlan_tunnel_init(vg);
1097 	if (ret)
1098 		goto err_tunnel_init;
1099 	INIT_LIST_HEAD(&vg->vlan_list);
1100 	rcu_assign_pointer(p->vlgrp, vg);
1101 	if (p->br->default_pvid) {
1102 		bool changed;
1103 
1104 		ret = nbp_vlan_add(p, p->br->default_pvid,
1105 				   BRIDGE_VLAN_INFO_PVID |
1106 				   BRIDGE_VLAN_INFO_UNTAGGED,
1107 				   &changed, extack);
1108 		if (ret)
1109 			goto err_vlan_add;
1110 	}
1111 out:
1112 	return ret;
1113 
1114 err_vlan_add:
1115 	RCU_INIT_POINTER(p->vlgrp, NULL);
1116 	synchronize_rcu();
1117 	vlan_tunnel_deinit(vg);
1118 err_tunnel_init:
1119 	rhashtable_destroy(&vg->vlan_hash);
1120 err_rhtbl:
1121 err_vlan_enabled:
1122 	kfree(vg);
1123 
1124 	goto out;
1125 }
1126 
1127 /* Must be protected by RTNL.
1128  * Must be called with vid in range from 1 to 4094 inclusive.
1129  * changed must be true only if the vlan was created or updated
1130  */
1131 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1132 		 bool *changed, struct netlink_ext_ack *extack)
1133 {
1134 	struct net_bridge_vlan *vlan;
1135 	int ret;
1136 
1137 	ASSERT_RTNL();
1138 
1139 	*changed = false;
1140 	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1141 	if (vlan) {
1142 		/* Pass the flags to the hardware bridge */
1143 		ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1144 		if (ret && ret != -EOPNOTSUPP)
1145 			return ret;
1146 		*changed = __vlan_add_flags(vlan, flags);
1147 
1148 		return 0;
1149 	}
1150 
1151 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1152 	if (!vlan)
1153 		return -ENOMEM;
1154 
1155 	vlan->vid = vid;
1156 	vlan->port = port;
1157 	ret = __vlan_add(vlan, flags, extack);
1158 	if (ret)
1159 		kfree(vlan);
1160 	else
1161 		*changed = true;
1162 
1163 	return ret;
1164 }
1165 
1166 /* Must be protected by RTNL.
1167  * Must be called with vid in range from 1 to 4094 inclusive.
1168  */
1169 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1170 {
1171 	struct net_bridge_vlan *v;
1172 
1173 	ASSERT_RTNL();
1174 
1175 	v = br_vlan_find(nbp_vlan_group(port), vid);
1176 	if (!v)
1177 		return -ENOENT;
1178 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1179 	br_fdb_delete_by_port(port->br, port, vid, 0);
1180 
1181 	return __vlan_del(v);
1182 }
1183 
1184 void nbp_vlan_flush(struct net_bridge_port *port)
1185 {
1186 	struct net_bridge_vlan_group *vg;
1187 
1188 	ASSERT_RTNL();
1189 
1190 	vg = nbp_vlan_group(port);
1191 	__vlan_flush(vg);
1192 	RCU_INIT_POINTER(port->vlgrp, NULL);
1193 	synchronize_rcu();
1194 	__vlan_group_free(vg);
1195 }
1196 
1197 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1198 		       struct br_vlan_stats *stats)
1199 {
1200 	int i;
1201 
1202 	memset(stats, 0, sizeof(*stats));
1203 	for_each_possible_cpu(i) {
1204 		u64 rxpackets, rxbytes, txpackets, txbytes;
1205 		struct br_vlan_stats *cpu_stats;
1206 		unsigned int start;
1207 
1208 		cpu_stats = per_cpu_ptr(v->stats, i);
1209 		do {
1210 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1211 			rxpackets = cpu_stats->rx_packets;
1212 			rxbytes = cpu_stats->rx_bytes;
1213 			txbytes = cpu_stats->tx_bytes;
1214 			txpackets = cpu_stats->tx_packets;
1215 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1216 
1217 		stats->rx_packets += rxpackets;
1218 		stats->rx_bytes += rxbytes;
1219 		stats->tx_bytes += txbytes;
1220 		stats->tx_packets += txpackets;
1221 	}
1222 }
1223 
1224 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1225 {
1226 	struct net_bridge_vlan_group *vg;
1227 	struct net_bridge_port *p;
1228 
1229 	ASSERT_RTNL();
1230 	p = br_port_get_check_rtnl(dev);
1231 	if (p)
1232 		vg = nbp_vlan_group(p);
1233 	else if (netif_is_bridge_master(dev))
1234 		vg = br_vlan_group(netdev_priv(dev));
1235 	else
1236 		return -EINVAL;
1237 
1238 	*p_pvid = br_get_pvid(vg);
1239 	return 0;
1240 }
1241 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1242 
1243 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1244 		     struct bridge_vlan_info *p_vinfo)
1245 {
1246 	struct net_bridge_vlan_group *vg;
1247 	struct net_bridge_vlan *v;
1248 	struct net_bridge_port *p;
1249 
1250 	ASSERT_RTNL();
1251 	p = br_port_get_check_rtnl(dev);
1252 	if (p)
1253 		vg = nbp_vlan_group(p);
1254 	else if (netif_is_bridge_master(dev))
1255 		vg = br_vlan_group(netdev_priv(dev));
1256 	else
1257 		return -EINVAL;
1258 
1259 	v = br_vlan_find(vg, vid);
1260 	if (!v)
1261 		return -ENOENT;
1262 
1263 	p_vinfo->vid = vid;
1264 	p_vinfo->flags = v->flags;
1265 	return 0;
1266 }
1267 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1268