xref: /openbmc/linux/net/bridge/br_vlan.c (revision 1fa0a7dc)
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6 
7 #include "br_private.h"
8 #include "br_private_tunnel.h"
9 
10 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
11 
12 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
13 			      const void *ptr)
14 {
15 	const struct net_bridge_vlan *vle = ptr;
16 	u16 vid = *(u16 *)arg->key;
17 
18 	return vle->vid != vid;
19 }
20 
21 static const struct rhashtable_params br_vlan_rht_params = {
22 	.head_offset = offsetof(struct net_bridge_vlan, vnode),
23 	.key_offset = offsetof(struct net_bridge_vlan, vid),
24 	.key_len = sizeof(u16),
25 	.nelem_hint = 3,
26 	.max_size = VLAN_N_VID,
27 	.obj_cmpfn = br_vlan_cmp,
28 	.automatic_shrinking = true,
29 };
30 
31 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
32 {
33 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
34 }
35 
36 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
37 {
38 	if (vg->pvid == vid)
39 		return false;
40 
41 	smp_wmb();
42 	vg->pvid = vid;
43 
44 	return true;
45 }
46 
47 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
48 {
49 	if (vg->pvid != vid)
50 		return false;
51 
52 	smp_wmb();
53 	vg->pvid = 0;
54 
55 	return true;
56 }
57 
58 /* return true if anything changed, false otherwise */
59 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
60 {
61 	struct net_bridge_vlan_group *vg;
62 	u16 old_flags = v->flags;
63 	bool ret;
64 
65 	if (br_vlan_is_master(v))
66 		vg = br_vlan_group(v->br);
67 	else
68 		vg = nbp_vlan_group(v->port);
69 
70 	if (flags & BRIDGE_VLAN_INFO_PVID)
71 		ret = __vlan_add_pvid(vg, v->vid);
72 	else
73 		ret = __vlan_delete_pvid(vg, v->vid);
74 
75 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
76 		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
77 	else
78 		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
79 
80 	return ret || !!(old_flags ^ v->flags);
81 }
82 
83 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
84 			  struct net_bridge_vlan *v, u16 flags,
85 			  struct netlink_ext_ack *extack)
86 {
87 	int err;
88 
89 	/* Try switchdev op first. In case it is not supported, fallback to
90 	 * 8021q add.
91 	 */
92 	err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
93 	if (err == -EOPNOTSUPP)
94 		return vlan_vid_add(dev, br->vlan_proto, v->vid);
95 	v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
96 	return err;
97 }
98 
99 static void __vlan_add_list(struct net_bridge_vlan *v)
100 {
101 	struct net_bridge_vlan_group *vg;
102 	struct list_head *headp, *hpos;
103 	struct net_bridge_vlan *vent;
104 
105 	if (br_vlan_is_master(v))
106 		vg = br_vlan_group(v->br);
107 	else
108 		vg = nbp_vlan_group(v->port);
109 
110 	headp = &vg->vlan_list;
111 	list_for_each_prev(hpos, headp) {
112 		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
113 		if (v->vid < vent->vid)
114 			continue;
115 		else
116 			break;
117 	}
118 	list_add_rcu(&v->vlist, hpos);
119 }
120 
121 static void __vlan_del_list(struct net_bridge_vlan *v)
122 {
123 	list_del_rcu(&v->vlist);
124 }
125 
126 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
127 			  const struct net_bridge_vlan *v)
128 {
129 	int err;
130 
131 	/* Try switchdev op first. In case it is not supported, fallback to
132 	 * 8021q del.
133 	 */
134 	err = br_switchdev_port_vlan_del(dev, v->vid);
135 	if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
136 		vlan_vid_del(dev, br->vlan_proto, v->vid);
137 	return err == -EOPNOTSUPP ? 0 : err;
138 }
139 
140 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
141  * a reference is taken to the master vlan before returning.
142  */
143 static struct net_bridge_vlan *
144 br_vlan_get_master(struct net_bridge *br, u16 vid,
145 		   struct netlink_ext_ack *extack)
146 {
147 	struct net_bridge_vlan_group *vg;
148 	struct net_bridge_vlan *masterv;
149 
150 	vg = br_vlan_group(br);
151 	masterv = br_vlan_find(vg, vid);
152 	if (!masterv) {
153 		bool changed;
154 
155 		/* missing global ctx, create it now */
156 		if (br_vlan_add(br, vid, 0, &changed, extack))
157 			return NULL;
158 		masterv = br_vlan_find(vg, vid);
159 		if (WARN_ON(!masterv))
160 			return NULL;
161 		refcount_set(&masterv->refcnt, 1);
162 		return masterv;
163 	}
164 	refcount_inc(&masterv->refcnt);
165 
166 	return masterv;
167 }
168 
169 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
170 {
171 	struct net_bridge_vlan *v;
172 
173 	v = container_of(rcu, struct net_bridge_vlan, rcu);
174 	WARN_ON(!br_vlan_is_master(v));
175 	free_percpu(v->stats);
176 	v->stats = NULL;
177 	kfree(v);
178 }
179 
180 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
181 {
182 	struct net_bridge_vlan_group *vg;
183 
184 	if (!br_vlan_is_master(masterv))
185 		return;
186 
187 	vg = br_vlan_group(masterv->br);
188 	if (refcount_dec_and_test(&masterv->refcnt)) {
189 		rhashtable_remove_fast(&vg->vlan_hash,
190 				       &masterv->vnode, br_vlan_rht_params);
191 		__vlan_del_list(masterv);
192 		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
193 	}
194 }
195 
196 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
197 {
198 	struct net_bridge_vlan *v;
199 
200 	v = container_of(rcu, struct net_bridge_vlan, rcu);
201 	WARN_ON(br_vlan_is_master(v));
202 	/* if we had per-port stats configured then free them here */
203 	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
204 		free_percpu(v->stats);
205 	v->stats = NULL;
206 	kfree(v);
207 }
208 
209 /* This is the shared VLAN add function which works for both ports and bridge
210  * devices. There are four possible calls to this function in terms of the
211  * vlan entry type:
212  * 1. vlan is being added on a port (no master flags, global entry exists)
213  * 2. vlan is being added on a bridge (both master and brentry flags)
214  * 3. vlan is being added on a port, but a global entry didn't exist which
215  *    is being created right now (master flag set, brentry flag unset), the
216  *    global entry is used for global per-vlan features, but not for filtering
217  * 4. same as 3 but with both master and brentry flags set so the entry
218  *    will be used for filtering in both the port and the bridge
219  */
220 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
221 		      struct netlink_ext_ack *extack)
222 {
223 	struct net_bridge_vlan *masterv = NULL;
224 	struct net_bridge_port *p = NULL;
225 	struct net_bridge_vlan_group *vg;
226 	struct net_device *dev;
227 	struct net_bridge *br;
228 	int err;
229 
230 	if (br_vlan_is_master(v)) {
231 		br = v->br;
232 		dev = br->dev;
233 		vg = br_vlan_group(br);
234 	} else {
235 		p = v->port;
236 		br = p->br;
237 		dev = p->dev;
238 		vg = nbp_vlan_group(p);
239 	}
240 
241 	if (p) {
242 		/* Add VLAN to the device filter if it is supported.
243 		 * This ensures tagged traffic enters the bridge when
244 		 * promiscuous mode is disabled by br_manage_promisc().
245 		 */
246 		err = __vlan_vid_add(dev, br, v, flags, extack);
247 		if (err)
248 			goto out;
249 
250 		/* need to work on the master vlan too */
251 		if (flags & BRIDGE_VLAN_INFO_MASTER) {
252 			bool changed;
253 
254 			err = br_vlan_add(br, v->vid,
255 					  flags | BRIDGE_VLAN_INFO_BRENTRY,
256 					  &changed, extack);
257 			if (err)
258 				goto out_filt;
259 		}
260 
261 		masterv = br_vlan_get_master(br, v->vid, extack);
262 		if (!masterv)
263 			goto out_filt;
264 		v->brvlan = masterv;
265 		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
266 			v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
267 			if (!v->stats) {
268 				err = -ENOMEM;
269 				goto out_filt;
270 			}
271 			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
272 		} else {
273 			v->stats = masterv->stats;
274 		}
275 	} else {
276 		err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
277 		if (err && err != -EOPNOTSUPP)
278 			goto out;
279 	}
280 
281 	/* Add the dev mac and count the vlan only if it's usable */
282 	if (br_vlan_should_use(v)) {
283 		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
284 		if (err) {
285 			br_err(br, "failed insert local address into bridge forwarding table\n");
286 			goto out_filt;
287 		}
288 		vg->num_vlans++;
289 	}
290 
291 	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
292 					    br_vlan_rht_params);
293 	if (err)
294 		goto out_fdb_insert;
295 
296 	__vlan_add_list(v);
297 	__vlan_add_flags(v, flags);
298 
299 	if (p)
300 		nbp_vlan_set_vlan_dev_state(p, v->vid);
301 out:
302 	return err;
303 
304 out_fdb_insert:
305 	if (br_vlan_should_use(v)) {
306 		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
307 		vg->num_vlans--;
308 	}
309 
310 out_filt:
311 	if (p) {
312 		__vlan_vid_del(dev, br, v);
313 		if (masterv) {
314 			if (v->stats && masterv->stats != v->stats)
315 				free_percpu(v->stats);
316 			v->stats = NULL;
317 
318 			br_vlan_put_master(masterv);
319 			v->brvlan = NULL;
320 		}
321 	} else {
322 		br_switchdev_port_vlan_del(dev, v->vid);
323 	}
324 
325 	goto out;
326 }
327 
328 static int __vlan_del(struct net_bridge_vlan *v)
329 {
330 	struct net_bridge_vlan *masterv = v;
331 	struct net_bridge_vlan_group *vg;
332 	struct net_bridge_port *p = NULL;
333 	int err = 0;
334 
335 	if (br_vlan_is_master(v)) {
336 		vg = br_vlan_group(v->br);
337 	} else {
338 		p = v->port;
339 		vg = nbp_vlan_group(v->port);
340 		masterv = v->brvlan;
341 	}
342 
343 	__vlan_delete_pvid(vg, v->vid);
344 	if (p) {
345 		err = __vlan_vid_del(p->dev, p->br, v);
346 		if (err)
347 			goto out;
348 	} else {
349 		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
350 		if (err && err != -EOPNOTSUPP)
351 			goto out;
352 		err = 0;
353 	}
354 
355 	if (br_vlan_should_use(v)) {
356 		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
357 		vg->num_vlans--;
358 	}
359 
360 	if (masterv != v) {
361 		vlan_tunnel_info_del(vg, v);
362 		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
363 				       br_vlan_rht_params);
364 		__vlan_del_list(v);
365 		nbp_vlan_set_vlan_dev_state(p, v->vid);
366 		call_rcu(&v->rcu, nbp_vlan_rcu_free);
367 	}
368 
369 	br_vlan_put_master(masterv);
370 out:
371 	return err;
372 }
373 
374 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
375 {
376 	WARN_ON(!list_empty(&vg->vlan_list));
377 	rhashtable_destroy(&vg->vlan_hash);
378 	vlan_tunnel_deinit(vg);
379 	kfree(vg);
380 }
381 
382 static void __vlan_flush(struct net_bridge_vlan_group *vg)
383 {
384 	struct net_bridge_vlan *vlan, *tmp;
385 
386 	__vlan_delete_pvid(vg, vg->pvid);
387 	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
388 		__vlan_del(vlan);
389 }
390 
391 struct sk_buff *br_handle_vlan(struct net_bridge *br,
392 			       const struct net_bridge_port *p,
393 			       struct net_bridge_vlan_group *vg,
394 			       struct sk_buff *skb)
395 {
396 	struct br_vlan_stats *stats;
397 	struct net_bridge_vlan *v;
398 	u16 vid;
399 
400 	/* If this packet was not filtered at input, let it pass */
401 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
402 		goto out;
403 
404 	/* At this point, we know that the frame was filtered and contains
405 	 * a valid vlan id.  If the vlan id has untagged flag set,
406 	 * send untagged; otherwise, send tagged.
407 	 */
408 	br_vlan_get_tag(skb, &vid);
409 	v = br_vlan_find(vg, vid);
410 	/* Vlan entry must be configured at this point.  The
411 	 * only exception is the bridge is set in promisc mode and the
412 	 * packet is destined for the bridge device.  In this case
413 	 * pass the packet as is.
414 	 */
415 	if (!v || !br_vlan_should_use(v)) {
416 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
417 			goto out;
418 		} else {
419 			kfree_skb(skb);
420 			return NULL;
421 		}
422 	}
423 	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
424 		stats = this_cpu_ptr(v->stats);
425 		u64_stats_update_begin(&stats->syncp);
426 		stats->tx_bytes += skb->len;
427 		stats->tx_packets++;
428 		u64_stats_update_end(&stats->syncp);
429 	}
430 
431 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
432 		__vlan_hwaccel_clear_tag(skb);
433 
434 	if (p && (p->flags & BR_VLAN_TUNNEL) &&
435 	    br_handle_egress_vlan_tunnel(skb, v)) {
436 		kfree_skb(skb);
437 		return NULL;
438 	}
439 out:
440 	return skb;
441 }
442 
443 /* Called under RCU */
444 static bool __allowed_ingress(const struct net_bridge *br,
445 			      struct net_bridge_vlan_group *vg,
446 			      struct sk_buff *skb, u16 *vid)
447 {
448 	struct br_vlan_stats *stats;
449 	struct net_bridge_vlan *v;
450 	bool tagged;
451 
452 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
453 	/* If vlan tx offload is disabled on bridge device and frame was
454 	 * sent from vlan device on the bridge device, it does not have
455 	 * HW accelerated vlan tag.
456 	 */
457 	if (unlikely(!skb_vlan_tag_present(skb) &&
458 		     skb->protocol == br->vlan_proto)) {
459 		skb = skb_vlan_untag(skb);
460 		if (unlikely(!skb))
461 			return false;
462 	}
463 
464 	if (!br_vlan_get_tag(skb, vid)) {
465 		/* Tagged frame */
466 		if (skb->vlan_proto != br->vlan_proto) {
467 			/* Protocol-mismatch, empty out vlan_tci for new tag */
468 			skb_push(skb, ETH_HLEN);
469 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
470 							skb_vlan_tag_get(skb));
471 			if (unlikely(!skb))
472 				return false;
473 
474 			skb_pull(skb, ETH_HLEN);
475 			skb_reset_mac_len(skb);
476 			*vid = 0;
477 			tagged = false;
478 		} else {
479 			tagged = true;
480 		}
481 	} else {
482 		/* Untagged frame */
483 		tagged = false;
484 	}
485 
486 	if (!*vid) {
487 		u16 pvid = br_get_pvid(vg);
488 
489 		/* Frame had a tag with VID 0 or did not have a tag.
490 		 * See if pvid is set on this port.  That tells us which
491 		 * vlan untagged or priority-tagged traffic belongs to.
492 		 */
493 		if (!pvid)
494 			goto drop;
495 
496 		/* PVID is set on this port.  Any untagged or priority-tagged
497 		 * ingress frame is considered to belong to this vlan.
498 		 */
499 		*vid = pvid;
500 		if (likely(!tagged))
501 			/* Untagged Frame. */
502 			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
503 		else
504 			/* Priority-tagged Frame.
505 			 * At this point, we know that skb->vlan_tci VID
506 			 * field was 0.
507 			 * We update only VID field and preserve PCP field.
508 			 */
509 			skb->vlan_tci |= pvid;
510 
511 		/* if stats are disabled we can avoid the lookup */
512 		if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
513 			return true;
514 	}
515 	v = br_vlan_find(vg, *vid);
516 	if (!v || !br_vlan_should_use(v))
517 		goto drop;
518 
519 	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
520 		stats = this_cpu_ptr(v->stats);
521 		u64_stats_update_begin(&stats->syncp);
522 		stats->rx_bytes += skb->len;
523 		stats->rx_packets++;
524 		u64_stats_update_end(&stats->syncp);
525 	}
526 
527 	return true;
528 
529 drop:
530 	kfree_skb(skb);
531 	return false;
532 }
533 
534 bool br_allowed_ingress(const struct net_bridge *br,
535 			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
536 			u16 *vid)
537 {
538 	/* If VLAN filtering is disabled on the bridge, all packets are
539 	 * permitted.
540 	 */
541 	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
542 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
543 		return true;
544 	}
545 
546 	return __allowed_ingress(br, vg, skb, vid);
547 }
548 
549 /* Called under RCU. */
550 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
551 		       const struct sk_buff *skb)
552 {
553 	const struct net_bridge_vlan *v;
554 	u16 vid;
555 
556 	/* If this packet was not filtered at input, let it pass */
557 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
558 		return true;
559 
560 	br_vlan_get_tag(skb, &vid);
561 	v = br_vlan_find(vg, vid);
562 	if (v && br_vlan_should_use(v))
563 		return true;
564 
565 	return false;
566 }
567 
568 /* Called under RCU */
569 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
570 {
571 	struct net_bridge_vlan_group *vg;
572 	struct net_bridge *br = p->br;
573 
574 	/* If filtering was disabled at input, let it pass. */
575 	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
576 		return true;
577 
578 	vg = nbp_vlan_group_rcu(p);
579 	if (!vg || !vg->num_vlans)
580 		return false;
581 
582 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
583 		*vid = 0;
584 
585 	if (!*vid) {
586 		*vid = br_get_pvid(vg);
587 		if (!*vid)
588 			return false;
589 
590 		return true;
591 	}
592 
593 	if (br_vlan_find(vg, *vid))
594 		return true;
595 
596 	return false;
597 }
598 
599 static int br_vlan_add_existing(struct net_bridge *br,
600 				struct net_bridge_vlan_group *vg,
601 				struct net_bridge_vlan *vlan,
602 				u16 flags, bool *changed,
603 				struct netlink_ext_ack *extack)
604 {
605 	int err;
606 
607 	err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
608 	if (err && err != -EOPNOTSUPP)
609 		return err;
610 
611 	if (!br_vlan_is_brentry(vlan)) {
612 		/* Trying to change flags of non-existent bridge vlan */
613 		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
614 			err = -EINVAL;
615 			goto err_flags;
616 		}
617 		/* It was only kept for port vlans, now make it real */
618 		err = br_fdb_insert(br, NULL, br->dev->dev_addr,
619 				    vlan->vid);
620 		if (err) {
621 			br_err(br, "failed to insert local address into bridge forwarding table\n");
622 			goto err_fdb_insert;
623 		}
624 
625 		refcount_inc(&vlan->refcnt);
626 		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
627 		vg->num_vlans++;
628 		*changed = true;
629 	}
630 
631 	if (__vlan_add_flags(vlan, flags))
632 		*changed = true;
633 
634 	return 0;
635 
636 err_fdb_insert:
637 err_flags:
638 	br_switchdev_port_vlan_del(br->dev, vlan->vid);
639 	return err;
640 }
641 
642 /* Must be protected by RTNL.
643  * Must be called with vid in range from 1 to 4094 inclusive.
644  * changed must be true only if the vlan was created or updated
645  */
646 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
647 		struct netlink_ext_ack *extack)
648 {
649 	struct net_bridge_vlan_group *vg;
650 	struct net_bridge_vlan *vlan;
651 	int ret;
652 
653 	ASSERT_RTNL();
654 
655 	*changed = false;
656 	vg = br_vlan_group(br);
657 	vlan = br_vlan_find(vg, vid);
658 	if (vlan)
659 		return br_vlan_add_existing(br, vg, vlan, flags, changed,
660 					    extack);
661 
662 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
663 	if (!vlan)
664 		return -ENOMEM;
665 
666 	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
667 	if (!vlan->stats) {
668 		kfree(vlan);
669 		return -ENOMEM;
670 	}
671 	vlan->vid = vid;
672 	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
673 	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
674 	vlan->br = br;
675 	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
676 		refcount_set(&vlan->refcnt, 1);
677 	ret = __vlan_add(vlan, flags, extack);
678 	if (ret) {
679 		free_percpu(vlan->stats);
680 		kfree(vlan);
681 	} else {
682 		*changed = true;
683 	}
684 
685 	return ret;
686 }
687 
688 /* Must be protected by RTNL.
689  * Must be called with vid in range from 1 to 4094 inclusive.
690  */
691 int br_vlan_delete(struct net_bridge *br, u16 vid)
692 {
693 	struct net_bridge_vlan_group *vg;
694 	struct net_bridge_vlan *v;
695 
696 	ASSERT_RTNL();
697 
698 	vg = br_vlan_group(br);
699 	v = br_vlan_find(vg, vid);
700 	if (!v || !br_vlan_is_brentry(v))
701 		return -ENOENT;
702 
703 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
704 	br_fdb_delete_by_port(br, NULL, vid, 0);
705 
706 	vlan_tunnel_info_del(vg, v);
707 
708 	return __vlan_del(v);
709 }
710 
711 void br_vlan_flush(struct net_bridge *br)
712 {
713 	struct net_bridge_vlan_group *vg;
714 
715 	ASSERT_RTNL();
716 
717 	vg = br_vlan_group(br);
718 	__vlan_flush(vg);
719 	RCU_INIT_POINTER(br->vlgrp, NULL);
720 	synchronize_rcu();
721 	__vlan_group_free(vg);
722 }
723 
724 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
725 {
726 	if (!vg)
727 		return NULL;
728 
729 	return br_vlan_lookup(&vg->vlan_hash, vid);
730 }
731 
732 /* Must be protected by RTNL. */
733 static void recalculate_group_addr(struct net_bridge *br)
734 {
735 	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
736 		return;
737 
738 	spin_lock_bh(&br->lock);
739 	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
740 	    br->vlan_proto == htons(ETH_P_8021Q)) {
741 		/* Bridge Group Address */
742 		br->group_addr[5] = 0x00;
743 	} else { /* vlan_enabled && ETH_P_8021AD */
744 		/* Provider Bridge Group Address */
745 		br->group_addr[5] = 0x08;
746 	}
747 	spin_unlock_bh(&br->lock);
748 }
749 
750 /* Must be protected by RTNL. */
751 void br_recalculate_fwd_mask(struct net_bridge *br)
752 {
753 	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
754 	    br->vlan_proto == htons(ETH_P_8021Q))
755 		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
756 	else /* vlan_enabled && ETH_P_8021AD */
757 		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
758 					      ~(1u << br->group_addr[5]);
759 }
760 
761 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
762 {
763 	struct switchdev_attr attr = {
764 		.orig_dev = br->dev,
765 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
766 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
767 		.u.vlan_filtering = val,
768 	};
769 	int err;
770 
771 	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
772 		return 0;
773 
774 	err = switchdev_port_attr_set(br->dev, &attr);
775 	if (err && err != -EOPNOTSUPP)
776 		return err;
777 
778 	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
779 	br_manage_promisc(br);
780 	recalculate_group_addr(br);
781 	br_recalculate_fwd_mask(br);
782 
783 	return 0;
784 }
785 
786 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
787 {
788 	return __br_vlan_filter_toggle(br, val);
789 }
790 
791 bool br_vlan_enabled(const struct net_device *dev)
792 {
793 	struct net_bridge *br = netdev_priv(dev);
794 
795 	return br_opt_get(br, BROPT_VLAN_ENABLED);
796 }
797 EXPORT_SYMBOL_GPL(br_vlan_enabled);
798 
799 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
800 {
801 	int err = 0;
802 	struct net_bridge_port *p;
803 	struct net_bridge_vlan *vlan;
804 	struct net_bridge_vlan_group *vg;
805 	__be16 oldproto;
806 
807 	if (br->vlan_proto == proto)
808 		return 0;
809 
810 	/* Add VLANs for the new proto to the device filter. */
811 	list_for_each_entry(p, &br->port_list, list) {
812 		vg = nbp_vlan_group(p);
813 		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
814 			err = vlan_vid_add(p->dev, proto, vlan->vid);
815 			if (err)
816 				goto err_filt;
817 		}
818 	}
819 
820 	oldproto = br->vlan_proto;
821 	br->vlan_proto = proto;
822 
823 	recalculate_group_addr(br);
824 	br_recalculate_fwd_mask(br);
825 
826 	/* Delete VLANs for the old proto from the device filter. */
827 	list_for_each_entry(p, &br->port_list, list) {
828 		vg = nbp_vlan_group(p);
829 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
830 			vlan_vid_del(p->dev, oldproto, vlan->vid);
831 	}
832 
833 	return 0;
834 
835 err_filt:
836 	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
837 		vlan_vid_del(p->dev, proto, vlan->vid);
838 
839 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
840 		vg = nbp_vlan_group(p);
841 		list_for_each_entry(vlan, &vg->vlan_list, vlist)
842 			vlan_vid_del(p->dev, proto, vlan->vid);
843 	}
844 
845 	return err;
846 }
847 
848 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
849 {
850 	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
851 		return -EPROTONOSUPPORT;
852 
853 	return __br_vlan_set_proto(br, htons(val));
854 }
855 
856 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
857 {
858 	switch (val) {
859 	case 0:
860 	case 1:
861 		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
862 		break;
863 	default:
864 		return -EINVAL;
865 	}
866 
867 	return 0;
868 }
869 
870 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
871 {
872 	struct net_bridge_port *p;
873 
874 	/* allow to change the option if there are no port vlans configured */
875 	list_for_each_entry(p, &br->port_list, list) {
876 		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
877 
878 		if (vg->num_vlans)
879 			return -EBUSY;
880 	}
881 
882 	switch (val) {
883 	case 0:
884 	case 1:
885 		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
886 		break;
887 	default:
888 		return -EINVAL;
889 	}
890 
891 	return 0;
892 }
893 
894 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
895 {
896 	struct net_bridge_vlan *v;
897 
898 	if (vid != vg->pvid)
899 		return false;
900 
901 	v = br_vlan_lookup(&vg->vlan_hash, vid);
902 	if (v && br_vlan_should_use(v) &&
903 	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
904 		return true;
905 
906 	return false;
907 }
908 
909 static void br_vlan_disable_default_pvid(struct net_bridge *br)
910 {
911 	struct net_bridge_port *p;
912 	u16 pvid = br->default_pvid;
913 
914 	/* Disable default_pvid on all ports where it is still
915 	 * configured.
916 	 */
917 	if (vlan_default_pvid(br_vlan_group(br), pvid))
918 		br_vlan_delete(br, pvid);
919 
920 	list_for_each_entry(p, &br->port_list, list) {
921 		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
922 			nbp_vlan_delete(p, pvid);
923 	}
924 
925 	br->default_pvid = 0;
926 }
927 
928 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
929 			       struct netlink_ext_ack *extack)
930 {
931 	const struct net_bridge_vlan *pvent;
932 	struct net_bridge_vlan_group *vg;
933 	struct net_bridge_port *p;
934 	unsigned long *changed;
935 	bool vlchange;
936 	u16 old_pvid;
937 	int err = 0;
938 
939 	if (!pvid) {
940 		br_vlan_disable_default_pvid(br);
941 		return 0;
942 	}
943 
944 	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
945 	if (!changed)
946 		return -ENOMEM;
947 
948 	old_pvid = br->default_pvid;
949 
950 	/* Update default_pvid config only if we do not conflict with
951 	 * user configuration.
952 	 */
953 	vg = br_vlan_group(br);
954 	pvent = br_vlan_find(vg, pvid);
955 	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
956 	    (!pvent || !br_vlan_should_use(pvent))) {
957 		err = br_vlan_add(br, pvid,
958 				  BRIDGE_VLAN_INFO_PVID |
959 				  BRIDGE_VLAN_INFO_UNTAGGED |
960 				  BRIDGE_VLAN_INFO_BRENTRY,
961 				  &vlchange, extack);
962 		if (err)
963 			goto out;
964 		br_vlan_delete(br, old_pvid);
965 		set_bit(0, changed);
966 	}
967 
968 	list_for_each_entry(p, &br->port_list, list) {
969 		/* Update default_pvid config only if we do not conflict with
970 		 * user configuration.
971 		 */
972 		vg = nbp_vlan_group(p);
973 		if ((old_pvid &&
974 		     !vlan_default_pvid(vg, old_pvid)) ||
975 		    br_vlan_find(vg, pvid))
976 			continue;
977 
978 		err = nbp_vlan_add(p, pvid,
979 				   BRIDGE_VLAN_INFO_PVID |
980 				   BRIDGE_VLAN_INFO_UNTAGGED,
981 				   &vlchange, extack);
982 		if (err)
983 			goto err_port;
984 		nbp_vlan_delete(p, old_pvid);
985 		set_bit(p->port_no, changed);
986 	}
987 
988 	br->default_pvid = pvid;
989 
990 out:
991 	bitmap_free(changed);
992 	return err;
993 
994 err_port:
995 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
996 		if (!test_bit(p->port_no, changed))
997 			continue;
998 
999 		if (old_pvid)
1000 			nbp_vlan_add(p, old_pvid,
1001 				     BRIDGE_VLAN_INFO_PVID |
1002 				     BRIDGE_VLAN_INFO_UNTAGGED,
1003 				     &vlchange, NULL);
1004 		nbp_vlan_delete(p, pvid);
1005 	}
1006 
1007 	if (test_bit(0, changed)) {
1008 		if (old_pvid)
1009 			br_vlan_add(br, old_pvid,
1010 				    BRIDGE_VLAN_INFO_PVID |
1011 				    BRIDGE_VLAN_INFO_UNTAGGED |
1012 				    BRIDGE_VLAN_INFO_BRENTRY,
1013 				    &vlchange, NULL);
1014 		br_vlan_delete(br, pvid);
1015 	}
1016 	goto out;
1017 }
1018 
1019 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1020 {
1021 	u16 pvid = val;
1022 	int err = 0;
1023 
1024 	if (val >= VLAN_VID_MASK)
1025 		return -EINVAL;
1026 
1027 	if (pvid == br->default_pvid)
1028 		goto out;
1029 
1030 	/* Only allow default pvid change when filtering is disabled */
1031 	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1032 		pr_info_once("Please disable vlan filtering to change default_pvid\n");
1033 		err = -EPERM;
1034 		goto out;
1035 	}
1036 	err = __br_vlan_set_default_pvid(br, pvid, NULL);
1037 out:
1038 	return err;
1039 }
1040 
1041 int br_vlan_init(struct net_bridge *br)
1042 {
1043 	struct net_bridge_vlan_group *vg;
1044 	int ret = -ENOMEM;
1045 	bool changed;
1046 
1047 	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1048 	if (!vg)
1049 		goto out;
1050 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1051 	if (ret)
1052 		goto err_rhtbl;
1053 	ret = vlan_tunnel_init(vg);
1054 	if (ret)
1055 		goto err_tunnel_init;
1056 	INIT_LIST_HEAD(&vg->vlan_list);
1057 	br->vlan_proto = htons(ETH_P_8021Q);
1058 	br->default_pvid = 1;
1059 	rcu_assign_pointer(br->vlgrp, vg);
1060 	ret = br_vlan_add(br, 1,
1061 			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
1062 			  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1063 	if (ret)
1064 		goto err_vlan_add;
1065 
1066 out:
1067 	return ret;
1068 
1069 err_vlan_add:
1070 	vlan_tunnel_deinit(vg);
1071 err_tunnel_init:
1072 	rhashtable_destroy(&vg->vlan_hash);
1073 err_rhtbl:
1074 	kfree(vg);
1075 
1076 	goto out;
1077 }
1078 
1079 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1080 {
1081 	struct switchdev_attr attr = {
1082 		.orig_dev = p->br->dev,
1083 		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1084 		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1085 		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1086 	};
1087 	struct net_bridge_vlan_group *vg;
1088 	int ret = -ENOMEM;
1089 
1090 	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1091 	if (!vg)
1092 		goto out;
1093 
1094 	ret = switchdev_port_attr_set(p->dev, &attr);
1095 	if (ret && ret != -EOPNOTSUPP)
1096 		goto err_vlan_enabled;
1097 
1098 	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1099 	if (ret)
1100 		goto err_rhtbl;
1101 	ret = vlan_tunnel_init(vg);
1102 	if (ret)
1103 		goto err_tunnel_init;
1104 	INIT_LIST_HEAD(&vg->vlan_list);
1105 	rcu_assign_pointer(p->vlgrp, vg);
1106 	if (p->br->default_pvid) {
1107 		bool changed;
1108 
1109 		ret = nbp_vlan_add(p, p->br->default_pvid,
1110 				   BRIDGE_VLAN_INFO_PVID |
1111 				   BRIDGE_VLAN_INFO_UNTAGGED,
1112 				   &changed, extack);
1113 		if (ret)
1114 			goto err_vlan_add;
1115 	}
1116 out:
1117 	return ret;
1118 
1119 err_vlan_add:
1120 	RCU_INIT_POINTER(p->vlgrp, NULL);
1121 	synchronize_rcu();
1122 	vlan_tunnel_deinit(vg);
1123 err_tunnel_init:
1124 	rhashtable_destroy(&vg->vlan_hash);
1125 err_rhtbl:
1126 err_vlan_enabled:
1127 	kfree(vg);
1128 
1129 	goto out;
1130 }
1131 
1132 /* Must be protected by RTNL.
1133  * Must be called with vid in range from 1 to 4094 inclusive.
1134  * changed must be true only if the vlan was created or updated
1135  */
1136 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1137 		 bool *changed, struct netlink_ext_ack *extack)
1138 {
1139 	struct net_bridge_vlan *vlan;
1140 	int ret;
1141 
1142 	ASSERT_RTNL();
1143 
1144 	*changed = false;
1145 	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1146 	if (vlan) {
1147 		/* Pass the flags to the hardware bridge */
1148 		ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1149 		if (ret && ret != -EOPNOTSUPP)
1150 			return ret;
1151 		*changed = __vlan_add_flags(vlan, flags);
1152 
1153 		return 0;
1154 	}
1155 
1156 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1157 	if (!vlan)
1158 		return -ENOMEM;
1159 
1160 	vlan->vid = vid;
1161 	vlan->port = port;
1162 	ret = __vlan_add(vlan, flags, extack);
1163 	if (ret)
1164 		kfree(vlan);
1165 	else
1166 		*changed = true;
1167 
1168 	return ret;
1169 }
1170 
1171 /* Must be protected by RTNL.
1172  * Must be called with vid in range from 1 to 4094 inclusive.
1173  */
1174 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1175 {
1176 	struct net_bridge_vlan *v;
1177 
1178 	ASSERT_RTNL();
1179 
1180 	v = br_vlan_find(nbp_vlan_group(port), vid);
1181 	if (!v)
1182 		return -ENOENT;
1183 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1184 	br_fdb_delete_by_port(port->br, port, vid, 0);
1185 
1186 	return __vlan_del(v);
1187 }
1188 
1189 void nbp_vlan_flush(struct net_bridge_port *port)
1190 {
1191 	struct net_bridge_vlan_group *vg;
1192 
1193 	ASSERT_RTNL();
1194 
1195 	vg = nbp_vlan_group(port);
1196 	__vlan_flush(vg);
1197 	RCU_INIT_POINTER(port->vlgrp, NULL);
1198 	synchronize_rcu();
1199 	__vlan_group_free(vg);
1200 }
1201 
1202 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1203 		       struct br_vlan_stats *stats)
1204 {
1205 	int i;
1206 
1207 	memset(stats, 0, sizeof(*stats));
1208 	for_each_possible_cpu(i) {
1209 		u64 rxpackets, rxbytes, txpackets, txbytes;
1210 		struct br_vlan_stats *cpu_stats;
1211 		unsigned int start;
1212 
1213 		cpu_stats = per_cpu_ptr(v->stats, i);
1214 		do {
1215 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1216 			rxpackets = cpu_stats->rx_packets;
1217 			rxbytes = cpu_stats->rx_bytes;
1218 			txbytes = cpu_stats->tx_bytes;
1219 			txpackets = cpu_stats->tx_packets;
1220 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1221 
1222 		stats->rx_packets += rxpackets;
1223 		stats->rx_bytes += rxbytes;
1224 		stats->tx_bytes += txbytes;
1225 		stats->tx_packets += txpackets;
1226 	}
1227 }
1228 
1229 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1230 {
1231 	struct net_bridge_vlan_group *vg;
1232 	struct net_bridge_port *p;
1233 
1234 	ASSERT_RTNL();
1235 	p = br_port_get_check_rtnl(dev);
1236 	if (p)
1237 		vg = nbp_vlan_group(p);
1238 	else if (netif_is_bridge_master(dev))
1239 		vg = br_vlan_group(netdev_priv(dev));
1240 	else
1241 		return -EINVAL;
1242 
1243 	*p_pvid = br_get_pvid(vg);
1244 	return 0;
1245 }
1246 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1247 
1248 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1249 		     struct bridge_vlan_info *p_vinfo)
1250 {
1251 	struct net_bridge_vlan_group *vg;
1252 	struct net_bridge_vlan *v;
1253 	struct net_bridge_port *p;
1254 
1255 	ASSERT_RTNL();
1256 	p = br_port_get_check_rtnl(dev);
1257 	if (p)
1258 		vg = nbp_vlan_group(p);
1259 	else if (netif_is_bridge_master(dev))
1260 		vg = br_vlan_group(netdev_priv(dev));
1261 	else
1262 		return -EINVAL;
1263 
1264 	v = br_vlan_find(vg, vid);
1265 	if (!v)
1266 		return -ENOENT;
1267 
1268 	p_vinfo->vid = vid;
1269 	p_vinfo->flags = v->flags;
1270 	return 0;
1271 }
1272 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1273 
1274 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1275 {
1276 	return is_vlan_dev(dev) &&
1277 		!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1278 }
1279 
1280 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1281 				       __always_unused void *data)
1282 {
1283 	return br_vlan_is_bind_vlan_dev(dev);
1284 }
1285 
1286 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1287 {
1288 	int found;
1289 
1290 	rcu_read_lock();
1291 	found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1292 					      NULL);
1293 	rcu_read_unlock();
1294 
1295 	return !!found;
1296 }
1297 
1298 struct br_vlan_bind_walk_data {
1299 	u16 vid;
1300 	struct net_device *result;
1301 };
1302 
1303 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1304 					  void *data_in)
1305 {
1306 	struct br_vlan_bind_walk_data *data = data_in;
1307 	int found = 0;
1308 
1309 	if (br_vlan_is_bind_vlan_dev(dev) &&
1310 	    vlan_dev_priv(dev)->vlan_id == data->vid) {
1311 		data->result = dev;
1312 		found = 1;
1313 	}
1314 
1315 	return found;
1316 }
1317 
1318 static struct net_device *
1319 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1320 {
1321 	struct br_vlan_bind_walk_data data = {
1322 		.vid = vid,
1323 	};
1324 
1325 	rcu_read_lock();
1326 	netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1327 				      &data);
1328 	rcu_read_unlock();
1329 
1330 	return data.result;
1331 }
1332 
1333 static bool br_vlan_is_dev_up(const struct net_device *dev)
1334 {
1335 	return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1336 }
1337 
1338 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1339 				       struct net_device *vlan_dev)
1340 {
1341 	u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1342 	struct net_bridge_vlan_group *vg;
1343 	struct net_bridge_port *p;
1344 	bool has_carrier = false;
1345 
1346 	if (!netif_carrier_ok(br->dev)) {
1347 		netif_carrier_off(vlan_dev);
1348 		return;
1349 	}
1350 
1351 	list_for_each_entry(p, &br->port_list, list) {
1352 		vg = nbp_vlan_group(p);
1353 		if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1354 			has_carrier = true;
1355 			break;
1356 		}
1357 	}
1358 
1359 	if (has_carrier)
1360 		netif_carrier_on(vlan_dev);
1361 	else
1362 		netif_carrier_off(vlan_dev);
1363 }
1364 
1365 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1366 {
1367 	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1368 	struct net_bridge_vlan *vlan;
1369 	struct net_device *vlan_dev;
1370 
1371 	list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1372 		vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1373 							   vlan->vid);
1374 		if (vlan_dev) {
1375 			if (br_vlan_is_dev_up(p->dev)) {
1376 				if (netif_carrier_ok(p->br->dev))
1377 					netif_carrier_on(vlan_dev);
1378 			} else {
1379 				br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1380 			}
1381 		}
1382 	}
1383 }
1384 
1385 static void br_vlan_upper_change(struct net_device *dev,
1386 				 struct net_device *upper_dev,
1387 				 bool linking)
1388 {
1389 	struct net_bridge *br = netdev_priv(dev);
1390 
1391 	if (!br_vlan_is_bind_vlan_dev(upper_dev))
1392 		return;
1393 
1394 	if (linking) {
1395 		br_vlan_set_vlan_dev_state(br, upper_dev);
1396 		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1397 	} else {
1398 		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1399 			      br_vlan_has_upper_bind_vlan_dev(dev));
1400 	}
1401 }
1402 
1403 struct br_vlan_link_state_walk_data {
1404 	struct net_bridge *br;
1405 };
1406 
1407 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1408 					void *data_in)
1409 {
1410 	struct br_vlan_link_state_walk_data *data = data_in;
1411 
1412 	if (br_vlan_is_bind_vlan_dev(vlan_dev))
1413 		br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1414 
1415 	return 0;
1416 }
1417 
1418 static void br_vlan_link_state_change(struct net_device *dev,
1419 				      struct net_bridge *br)
1420 {
1421 	struct br_vlan_link_state_walk_data data = {
1422 		.br = br
1423 	};
1424 
1425 	rcu_read_lock();
1426 	netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1427 				      &data);
1428 	rcu_read_unlock();
1429 }
1430 
1431 /* Must be protected by RTNL. */
1432 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1433 {
1434 	struct net_device *vlan_dev;
1435 
1436 	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1437 		return;
1438 
1439 	vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1440 	if (vlan_dev)
1441 		br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1442 }
1443 
1444 /* Must be protected by RTNL. */
1445 void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
1446 			  void *ptr)
1447 {
1448 	struct netdev_notifier_changeupper_info *info;
1449 	struct net_bridge *br;
1450 
1451 	switch (event) {
1452 	case NETDEV_CHANGEUPPER:
1453 		info = ptr;
1454 		br_vlan_upper_change(dev, info->upper_dev, info->linking);
1455 		break;
1456 
1457 	case NETDEV_CHANGE:
1458 	case NETDEV_UP:
1459 		br = netdev_priv(dev);
1460 		if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1461 			return;
1462 		br_vlan_link_state_change(dev, br);
1463 		break;
1464 	}
1465 }
1466 
1467 /* Must be protected by RTNL. */
1468 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1469 {
1470 	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1471 		return;
1472 
1473 	switch (event) {
1474 	case NETDEV_CHANGE:
1475 	case NETDEV_DOWN:
1476 	case NETDEV_UP:
1477 		br_vlan_set_all_vlan_dev_state(p);
1478 		break;
1479 	}
1480 }
1481