xref: /openbmc/linux/net/bridge/br_netlink.c (revision 80483c3a)
1 /*
2  *	Bridge netlink control interface
3  *
4  *	Authors:
5  *	Stephen Hemminger		<shemminger@osdl.org>
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/etherdevice.h>
16 #include <net/rtnetlink.h>
17 #include <net/net_namespace.h>
18 #include <net/sock.h>
19 #include <uapi/linux/if_bridge.h>
20 
21 #include "br_private.h"
22 #include "br_private_stp.h"
23 
24 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
25 				u32 filter_mask)
26 {
27 	struct net_bridge_vlan *v;
28 	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
29 	u16 flags, pvid;
30 	int num_vlans = 0;
31 
32 	if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
33 		return 0;
34 
35 	pvid = br_get_pvid(vg);
36 	/* Count number of vlan infos */
37 	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
38 		flags = 0;
39 		/* only a context, bridge vlan not activated */
40 		if (!br_vlan_should_use(v))
41 			continue;
42 		if (v->vid == pvid)
43 			flags |= BRIDGE_VLAN_INFO_PVID;
44 
45 		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
46 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
47 
48 		if (vid_range_start == 0) {
49 			goto initvars;
50 		} else if ((v->vid - vid_range_end) == 1 &&
51 			flags == vid_range_flags) {
52 			vid_range_end = v->vid;
53 			continue;
54 		} else {
55 			if ((vid_range_end - vid_range_start) > 0)
56 				num_vlans += 2;
57 			else
58 				num_vlans += 1;
59 		}
60 initvars:
61 		vid_range_start = v->vid;
62 		vid_range_end = v->vid;
63 		vid_range_flags = flags;
64 	}
65 
66 	if (vid_range_start != 0) {
67 		if ((vid_range_end - vid_range_start) > 0)
68 			num_vlans += 2;
69 		else
70 			num_vlans += 1;
71 	}
72 
73 	return num_vlans;
74 }
75 
76 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
77 				 u32 filter_mask)
78 {
79 	int num_vlans;
80 
81 	if (!vg)
82 		return 0;
83 
84 	if (filter_mask & RTEXT_FILTER_BRVLAN)
85 		return vg->num_vlans;
86 
87 	rcu_read_lock();
88 	num_vlans = __get_num_vlan_infos(vg, filter_mask);
89 	rcu_read_unlock();
90 
91 	return num_vlans;
92 }
93 
94 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
95 					   u32 filter_mask)
96 {
97 	struct net_bridge_vlan_group *vg = NULL;
98 	struct net_bridge_port *p;
99 	struct net_bridge *br;
100 	int num_vlan_infos;
101 
102 	rcu_read_lock();
103 	if (br_port_exists(dev)) {
104 		p = br_port_get_rcu(dev);
105 		vg = nbp_vlan_group_rcu(p);
106 	} else if (dev->priv_flags & IFF_EBRIDGE) {
107 		br = netdev_priv(dev);
108 		vg = br_vlan_group_rcu(br);
109 	}
110 	num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
111 	rcu_read_unlock();
112 
113 	/* Each VLAN is returned in bridge_vlan_info along with flags */
114 	return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
115 }
116 
117 static inline size_t br_port_info_size(void)
118 {
119 	return nla_total_size(1)	/* IFLA_BRPORT_STATE  */
120 		+ nla_total_size(2)	/* IFLA_BRPORT_PRIORITY */
121 		+ nla_total_size(4)	/* IFLA_BRPORT_COST */
122 		+ nla_total_size(1)	/* IFLA_BRPORT_MODE */
123 		+ nla_total_size(1)	/* IFLA_BRPORT_GUARD */
124 		+ nla_total_size(1)	/* IFLA_BRPORT_PROTECT */
125 		+ nla_total_size(1)	/* IFLA_BRPORT_FAST_LEAVE */
126 		+ nla_total_size(1)	/* IFLA_BRPORT_LEARNING */
127 		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
128 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
129 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP_WIFI */
130 		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_ROOT_ID */
131 		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_BRIDGE_ID */
132 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_PORT */
133 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_COST */
134 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_ID */
135 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_NO */
136 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
137 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_CONFIG_PENDING */
138 		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
139 		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
140 		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
141 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
142 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MULTICAST_ROUTER */
143 #endif
144 		+ 0;
145 }
146 
147 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
148 {
149 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
150 		+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
151 		+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
152 		+ nla_total_size(4) /* IFLA_MASTER */
153 		+ nla_total_size(4) /* IFLA_MTU */
154 		+ nla_total_size(4) /* IFLA_LINK */
155 		+ nla_total_size(1) /* IFLA_OPERSTATE */
156 		+ nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
157 		+ nla_total_size(br_get_link_af_size_filtered(dev,
158 				 filter_mask)); /* IFLA_AF_SPEC */
159 }
160 
161 static int br_port_fill_attrs(struct sk_buff *skb,
162 			      const struct net_bridge_port *p)
163 {
164 	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
165 	u64 timerval;
166 
167 	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
168 	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
169 	    nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
170 	    nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
171 	    nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
172 	    nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
173 	    nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
174 	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
175 	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
176 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
177 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
178 		       !!(p->flags & BR_PROXYARP_WIFI)) ||
179 	    nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
180 		    &p->designated_root) ||
181 	    nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
182 		    &p->designated_bridge) ||
183 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
184 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
185 	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
186 	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
187 	    nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
188 		       p->topology_change_ack) ||
189 	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending))
190 		return -EMSGSIZE;
191 
192 	timerval = br_timer_value(&p->message_age_timer);
193 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
194 			      IFLA_BRPORT_PAD))
195 		return -EMSGSIZE;
196 	timerval = br_timer_value(&p->forward_delay_timer);
197 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
198 			      IFLA_BRPORT_PAD))
199 		return -EMSGSIZE;
200 	timerval = br_timer_value(&p->hold_timer);
201 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
202 			      IFLA_BRPORT_PAD))
203 		return -EMSGSIZE;
204 
205 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
206 	if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
207 		       p->multicast_router))
208 		return -EMSGSIZE;
209 #endif
210 
211 	return 0;
212 }
213 
214 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
215 				    u16 vid_end, u16 flags)
216 {
217 	struct  bridge_vlan_info vinfo;
218 
219 	if ((vid_end - vid_start) > 0) {
220 		/* add range to skb */
221 		vinfo.vid = vid_start;
222 		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
223 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
224 			    sizeof(vinfo), &vinfo))
225 			goto nla_put_failure;
226 
227 		vinfo.vid = vid_end;
228 		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
229 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
230 			    sizeof(vinfo), &vinfo))
231 			goto nla_put_failure;
232 	} else {
233 		vinfo.vid = vid_start;
234 		vinfo.flags = flags;
235 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
236 			    sizeof(vinfo), &vinfo))
237 			goto nla_put_failure;
238 	}
239 
240 	return 0;
241 
242 nla_put_failure:
243 	return -EMSGSIZE;
244 }
245 
246 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
247 					 struct net_bridge_vlan_group *vg)
248 {
249 	struct net_bridge_vlan *v;
250 	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
251 	u16 flags, pvid;
252 	int err = 0;
253 
254 	/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
255 	 * and mark vlan info with begin and end flags
256 	 * if vlaninfo represents a range
257 	 */
258 	pvid = br_get_pvid(vg);
259 	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
260 		flags = 0;
261 		if (!br_vlan_should_use(v))
262 			continue;
263 		if (v->vid == pvid)
264 			flags |= BRIDGE_VLAN_INFO_PVID;
265 
266 		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
267 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
268 
269 		if (vid_range_start == 0) {
270 			goto initvars;
271 		} else if ((v->vid - vid_range_end) == 1 &&
272 			flags == vid_range_flags) {
273 			vid_range_end = v->vid;
274 			continue;
275 		} else {
276 			err = br_fill_ifvlaninfo_range(skb, vid_range_start,
277 						       vid_range_end,
278 						       vid_range_flags);
279 			if (err)
280 				return err;
281 		}
282 
283 initvars:
284 		vid_range_start = v->vid;
285 		vid_range_end = v->vid;
286 		vid_range_flags = flags;
287 	}
288 
289 	if (vid_range_start != 0) {
290 		/* Call it once more to send any left over vlans */
291 		err = br_fill_ifvlaninfo_range(skb, vid_range_start,
292 					       vid_range_end,
293 					       vid_range_flags);
294 		if (err)
295 			return err;
296 	}
297 
298 	return 0;
299 }
300 
301 static int br_fill_ifvlaninfo(struct sk_buff *skb,
302 			      struct net_bridge_vlan_group *vg)
303 {
304 	struct bridge_vlan_info vinfo;
305 	struct net_bridge_vlan *v;
306 	u16 pvid;
307 
308 	pvid = br_get_pvid(vg);
309 	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
310 		if (!br_vlan_should_use(v))
311 			continue;
312 
313 		vinfo.vid = v->vid;
314 		vinfo.flags = 0;
315 		if (v->vid == pvid)
316 			vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
317 
318 		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
319 			vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
320 
321 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
322 			    sizeof(vinfo), &vinfo))
323 			goto nla_put_failure;
324 	}
325 
326 	return 0;
327 
328 nla_put_failure:
329 	return -EMSGSIZE;
330 }
331 
332 /*
333  * Create one netlink message for one interface
334  * Contains port and master info as well as carrier and bridge state.
335  */
336 static int br_fill_ifinfo(struct sk_buff *skb,
337 			  struct net_bridge_port *port,
338 			  u32 pid, u32 seq, int event, unsigned int flags,
339 			  u32 filter_mask, const struct net_device *dev)
340 {
341 	struct net_bridge *br;
342 	struct ifinfomsg *hdr;
343 	struct nlmsghdr *nlh;
344 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
345 
346 	if (port)
347 		br = port->br;
348 	else
349 		br = netdev_priv(dev);
350 
351 	br_debug(br, "br_fill_info event %d port %s master %s\n",
352 		     event, dev->name, br->dev->name);
353 
354 	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
355 	if (nlh == NULL)
356 		return -EMSGSIZE;
357 
358 	hdr = nlmsg_data(nlh);
359 	hdr->ifi_family = AF_BRIDGE;
360 	hdr->__ifi_pad = 0;
361 	hdr->ifi_type = dev->type;
362 	hdr->ifi_index = dev->ifindex;
363 	hdr->ifi_flags = dev_get_flags(dev);
364 	hdr->ifi_change = 0;
365 
366 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
367 	    nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
368 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
369 	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
370 	    (dev->addr_len &&
371 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
372 	    (dev->ifindex != dev_get_iflink(dev) &&
373 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
374 		goto nla_put_failure;
375 
376 	if (event == RTM_NEWLINK && port) {
377 		struct nlattr *nest
378 			= nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
379 
380 		if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
381 			goto nla_put_failure;
382 		nla_nest_end(skb, nest);
383 	}
384 
385 	/* Check if  the VID information is requested */
386 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
387 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
388 		struct net_bridge_vlan_group *vg;
389 		struct nlattr *af;
390 		int err;
391 
392 		/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
393 		rcu_read_lock();
394 		if (port)
395 			vg = nbp_vlan_group_rcu(port);
396 		else
397 			vg = br_vlan_group_rcu(br);
398 
399 		if (!vg || !vg->num_vlans) {
400 			rcu_read_unlock();
401 			goto done;
402 		}
403 		af = nla_nest_start(skb, IFLA_AF_SPEC);
404 		if (!af) {
405 			rcu_read_unlock();
406 			goto nla_put_failure;
407 		}
408 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
409 			err = br_fill_ifvlaninfo_compressed(skb, vg);
410 		else
411 			err = br_fill_ifvlaninfo(skb, vg);
412 		rcu_read_unlock();
413 		if (err)
414 			goto nla_put_failure;
415 		nla_nest_end(skb, af);
416 	}
417 
418 done:
419 	nlmsg_end(skb, nlh);
420 	return 0;
421 
422 nla_put_failure:
423 	nlmsg_cancel(skb, nlh);
424 	return -EMSGSIZE;
425 }
426 
427 /*
428  * Notify listeners of a change in port information
429  */
430 void br_ifinfo_notify(int event, struct net_bridge_port *port)
431 {
432 	struct net *net;
433 	struct sk_buff *skb;
434 	int err = -ENOBUFS;
435 	u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
436 
437 	if (!port)
438 		return;
439 
440 	net = dev_net(port->dev);
441 	br_debug(port->br, "port %u(%s) event %d\n",
442 		 (unsigned int)port->port_no, port->dev->name, event);
443 
444 	skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC);
445 	if (skb == NULL)
446 		goto errout;
447 
448 	err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev);
449 	if (err < 0) {
450 		/* -EMSGSIZE implies BUG in br_nlmsg_size() */
451 		WARN_ON(err == -EMSGSIZE);
452 		kfree_skb(skb);
453 		goto errout;
454 	}
455 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
456 	return;
457 errout:
458 	rtnl_set_sk_err(net, RTNLGRP_LINK, err);
459 }
460 
461 
462 /*
463  * Dump information about all ports, in response to GETLINK
464  */
465 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
466 	       struct net_device *dev, u32 filter_mask, int nlflags)
467 {
468 	struct net_bridge_port *port = br_port_get_rtnl(dev);
469 
470 	if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
471 	    !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
472 		return 0;
473 
474 	return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
475 			      filter_mask, dev);
476 }
477 
478 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
479 			int cmd, struct bridge_vlan_info *vinfo)
480 {
481 	int err = 0;
482 
483 	switch (cmd) {
484 	case RTM_SETLINK:
485 		if (p) {
486 			/* if the MASTER flag is set this will act on the global
487 			 * per-VLAN entry as well
488 			 */
489 			err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
490 			if (err)
491 				break;
492 		} else {
493 			vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
494 			err = br_vlan_add(br, vinfo->vid, vinfo->flags);
495 		}
496 		break;
497 
498 	case RTM_DELLINK:
499 		if (p) {
500 			nbp_vlan_delete(p, vinfo->vid);
501 			if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
502 				br_vlan_delete(p->br, vinfo->vid);
503 		} else {
504 			br_vlan_delete(br, vinfo->vid);
505 		}
506 		break;
507 	}
508 
509 	return err;
510 }
511 
512 static int br_afspec(struct net_bridge *br,
513 		     struct net_bridge_port *p,
514 		     struct nlattr *af_spec,
515 		     int cmd)
516 {
517 	struct bridge_vlan_info *vinfo_start = NULL;
518 	struct bridge_vlan_info *vinfo = NULL;
519 	struct nlattr *attr;
520 	int err = 0;
521 	int rem;
522 
523 	nla_for_each_nested(attr, af_spec, rem) {
524 		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
525 			continue;
526 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
527 			return -EINVAL;
528 		vinfo = nla_data(attr);
529 		if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
530 			return -EINVAL;
531 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
532 			if (vinfo_start)
533 				return -EINVAL;
534 			vinfo_start = vinfo;
535 			/* don't allow range of pvids */
536 			if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID)
537 				return -EINVAL;
538 			continue;
539 		}
540 
541 		if (vinfo_start) {
542 			struct bridge_vlan_info tmp_vinfo;
543 			int v;
544 
545 			if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END))
546 				return -EINVAL;
547 
548 			if (vinfo->vid <= vinfo_start->vid)
549 				return -EINVAL;
550 
551 			memcpy(&tmp_vinfo, vinfo_start,
552 			       sizeof(struct bridge_vlan_info));
553 
554 			for (v = vinfo_start->vid; v <= vinfo->vid; v++) {
555 				tmp_vinfo.vid = v;
556 				err = br_vlan_info(br, p, cmd, &tmp_vinfo);
557 				if (err)
558 					break;
559 			}
560 			vinfo_start = NULL;
561 		} else {
562 			err = br_vlan_info(br, p, cmd, vinfo);
563 		}
564 		if (err)
565 			break;
566 	}
567 
568 	return err;
569 }
570 
571 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
572 	[IFLA_BRPORT_STATE]	= { .type = NLA_U8 },
573 	[IFLA_BRPORT_COST]	= { .type = NLA_U32 },
574 	[IFLA_BRPORT_PRIORITY]	= { .type = NLA_U16 },
575 	[IFLA_BRPORT_MODE]	= { .type = NLA_U8 },
576 	[IFLA_BRPORT_GUARD]	= { .type = NLA_U8 },
577 	[IFLA_BRPORT_PROTECT]	= { .type = NLA_U8 },
578 	[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
579 	[IFLA_BRPORT_LEARNING]	= { .type = NLA_U8 },
580 	[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
581 	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
582 	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
583 	[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
584 };
585 
586 /* Change the state of the port and notify spanning tree */
587 static int br_set_port_state(struct net_bridge_port *p, u8 state)
588 {
589 	if (state > BR_STATE_BLOCKING)
590 		return -EINVAL;
591 
592 	/* if kernel STP is running, don't allow changes */
593 	if (p->br->stp_enabled == BR_KERNEL_STP)
594 		return -EBUSY;
595 
596 	/* if device is not up, change is not allowed
597 	 * if link is not present, only allowable state is disabled
598 	 */
599 	if (!netif_running(p->dev) ||
600 	    (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
601 		return -ENETDOWN;
602 
603 	br_set_state(p, state);
604 	br_port_state_selection(p->br);
605 	return 0;
606 }
607 
608 /* Set/clear or port flags based on attribute */
609 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
610 			   int attrtype, unsigned long mask)
611 {
612 	if (tb[attrtype]) {
613 		u8 flag = nla_get_u8(tb[attrtype]);
614 		if (flag)
615 			p->flags |= mask;
616 		else
617 			p->flags &= ~mask;
618 	}
619 }
620 
621 /* Process bridge protocol info on port */
622 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
623 {
624 	int err;
625 	unsigned long old_flags = p->flags;
626 
627 	br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
628 	br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
629 	br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
630 	br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
631 	br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
632 	br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
633 	br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
634 	br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
635 
636 	if (tb[IFLA_BRPORT_COST]) {
637 		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
638 		if (err)
639 			return err;
640 	}
641 
642 	if (tb[IFLA_BRPORT_PRIORITY]) {
643 		err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
644 		if (err)
645 			return err;
646 	}
647 
648 	if (tb[IFLA_BRPORT_STATE]) {
649 		err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
650 		if (err)
651 			return err;
652 	}
653 
654 	if (tb[IFLA_BRPORT_FLUSH])
655 		br_fdb_delete_by_port(p->br, p, 0, 0);
656 
657 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
658 	if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
659 		u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
660 
661 		err = br_multicast_set_port_router(p, mcast_router);
662 		if (err)
663 			return err;
664 	}
665 #endif
666 	br_port_flags_change(p, old_flags ^ p->flags);
667 	return 0;
668 }
669 
670 /* Change state and parameters on port. */
671 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
672 {
673 	struct nlattr *protinfo;
674 	struct nlattr *afspec;
675 	struct net_bridge_port *p;
676 	struct nlattr *tb[IFLA_BRPORT_MAX + 1];
677 	int err = 0;
678 
679 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
680 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
681 	if (!protinfo && !afspec)
682 		return 0;
683 
684 	p = br_port_get_rtnl(dev);
685 	/* We want to accept dev as bridge itself if the AF_SPEC
686 	 * is set to see if someone is setting vlan info on the bridge
687 	 */
688 	if (!p && !afspec)
689 		return -EINVAL;
690 
691 	if (p && protinfo) {
692 		if (protinfo->nla_type & NLA_F_NESTED) {
693 			err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
694 					       protinfo, br_port_policy);
695 			if (err)
696 				return err;
697 
698 			spin_lock_bh(&p->br->lock);
699 			err = br_setport(p, tb);
700 			spin_unlock_bh(&p->br->lock);
701 		} else {
702 			/* Binary compatibility with old RSTP */
703 			if (nla_len(protinfo) < sizeof(u8))
704 				return -EINVAL;
705 
706 			spin_lock_bh(&p->br->lock);
707 			err = br_set_port_state(p, nla_get_u8(protinfo));
708 			spin_unlock_bh(&p->br->lock);
709 		}
710 		if (err)
711 			goto out;
712 	}
713 
714 	if (afspec) {
715 		err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
716 				afspec, RTM_SETLINK);
717 	}
718 
719 	if (err == 0)
720 		br_ifinfo_notify(RTM_NEWLINK, p);
721 out:
722 	return err;
723 }
724 
725 /* Delete port information */
726 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
727 {
728 	struct nlattr *afspec;
729 	struct net_bridge_port *p;
730 	int err = 0;
731 
732 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
733 	if (!afspec)
734 		return 0;
735 
736 	p = br_port_get_rtnl(dev);
737 	/* We want to accept dev as bridge itself as well */
738 	if (!p && !(dev->priv_flags & IFF_EBRIDGE))
739 		return -EINVAL;
740 
741 	err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
742 			afspec, RTM_DELLINK);
743 	if (err == 0)
744 		/* Send RTM_NEWLINK because userspace
745 		 * expects RTM_NEWLINK for vlan dels
746 		 */
747 		br_ifinfo_notify(RTM_NEWLINK, p);
748 
749 	return err;
750 }
751 static int br_validate(struct nlattr *tb[], struct nlattr *data[])
752 {
753 	if (tb[IFLA_ADDRESS]) {
754 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
755 			return -EINVAL;
756 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
757 			return -EADDRNOTAVAIL;
758 	}
759 
760 	if (!data)
761 		return 0;
762 
763 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
764 	if (data[IFLA_BR_VLAN_PROTOCOL]) {
765 		switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
766 		case htons(ETH_P_8021Q):
767 		case htons(ETH_P_8021AD):
768 			break;
769 		default:
770 			return -EPROTONOSUPPORT;
771 		}
772 	}
773 #endif
774 
775 	return 0;
776 }
777 
778 static int br_dev_newlink(struct net *src_net, struct net_device *dev,
779 			  struct nlattr *tb[], struct nlattr *data[])
780 {
781 	struct net_bridge *br = netdev_priv(dev);
782 
783 	if (tb[IFLA_ADDRESS]) {
784 		spin_lock_bh(&br->lock);
785 		br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
786 		spin_unlock_bh(&br->lock);
787 	}
788 
789 	return register_netdevice(dev);
790 }
791 
792 static int br_port_slave_changelink(struct net_device *brdev,
793 				    struct net_device *dev,
794 				    struct nlattr *tb[],
795 				    struct nlattr *data[])
796 {
797 	struct net_bridge *br = netdev_priv(brdev);
798 	int ret;
799 
800 	if (!data)
801 		return 0;
802 
803 	spin_lock_bh(&br->lock);
804 	ret = br_setport(br_port_get_rtnl(dev), data);
805 	spin_unlock_bh(&br->lock);
806 
807 	return ret;
808 }
809 
810 static int br_port_fill_slave_info(struct sk_buff *skb,
811 				   const struct net_device *brdev,
812 				   const struct net_device *dev)
813 {
814 	return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
815 }
816 
817 static size_t br_port_get_slave_size(const struct net_device *brdev,
818 				     const struct net_device *dev)
819 {
820 	return br_port_info_size();
821 }
822 
823 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
824 	[IFLA_BR_FORWARD_DELAY]	= { .type = NLA_U32 },
825 	[IFLA_BR_HELLO_TIME]	= { .type = NLA_U32 },
826 	[IFLA_BR_MAX_AGE]	= { .type = NLA_U32 },
827 	[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
828 	[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
829 	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
830 	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
831 	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
832 	[IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
833 	[IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
834 				 .len  = ETH_ALEN },
835 	[IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
836 	[IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
837 	[IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
838 	[IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
839 	[IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
840 	[IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
841 	[IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
842 	[IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
843 	[IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
844 	[IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
845 	[IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
846 	[IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
847 	[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
848 	[IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
849 	[IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
850 	[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
851 	[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
852 	[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
853 	[IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
854 	[IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
855 };
856 
857 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
858 			 struct nlattr *data[])
859 {
860 	struct net_bridge *br = netdev_priv(brdev);
861 	int err;
862 
863 	if (!data)
864 		return 0;
865 
866 	if (data[IFLA_BR_FORWARD_DELAY]) {
867 		err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
868 		if (err)
869 			return err;
870 	}
871 
872 	if (data[IFLA_BR_HELLO_TIME]) {
873 		err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
874 		if (err)
875 			return err;
876 	}
877 
878 	if (data[IFLA_BR_MAX_AGE]) {
879 		err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
880 		if (err)
881 			return err;
882 	}
883 
884 	if (data[IFLA_BR_AGEING_TIME]) {
885 		err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
886 		if (err)
887 			return err;
888 	}
889 
890 	if (data[IFLA_BR_STP_STATE]) {
891 		u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
892 
893 		br_stp_set_enabled(br, stp_enabled);
894 	}
895 
896 	if (data[IFLA_BR_PRIORITY]) {
897 		u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
898 
899 		br_stp_set_bridge_priority(br, priority);
900 	}
901 
902 	if (data[IFLA_BR_VLAN_FILTERING]) {
903 		u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
904 
905 		err = __br_vlan_filter_toggle(br, vlan_filter);
906 		if (err)
907 			return err;
908 	}
909 
910 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
911 	if (data[IFLA_BR_VLAN_PROTOCOL]) {
912 		__be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
913 
914 		err = __br_vlan_set_proto(br, vlan_proto);
915 		if (err)
916 			return err;
917 	}
918 
919 	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
920 		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
921 
922 		err = __br_vlan_set_default_pvid(br, defpvid);
923 		if (err)
924 			return err;
925 	}
926 
927 	if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
928 		__u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
929 
930 		err = br_vlan_set_stats(br, vlan_stats);
931 		if (err)
932 			return err;
933 	}
934 #endif
935 
936 	if (data[IFLA_BR_GROUP_FWD_MASK]) {
937 		u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
938 
939 		if (fwd_mask & BR_GROUPFWD_RESTRICTED)
940 			return -EINVAL;
941 		br->group_fwd_mask = fwd_mask;
942 	}
943 
944 	if (data[IFLA_BR_GROUP_ADDR]) {
945 		u8 new_addr[ETH_ALEN];
946 
947 		if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
948 			return -EINVAL;
949 		memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
950 		if (!is_link_local_ether_addr(new_addr))
951 			return -EINVAL;
952 		if (new_addr[5] == 1 ||		/* 802.3x Pause address */
953 		    new_addr[5] == 2 ||		/* 802.3ad Slow protocols */
954 		    new_addr[5] == 3)		/* 802.1X PAE address */
955 			return -EINVAL;
956 		spin_lock_bh(&br->lock);
957 		memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
958 		spin_unlock_bh(&br->lock);
959 		br->group_addr_set = true;
960 		br_recalculate_fwd_mask(br);
961 	}
962 
963 	if (data[IFLA_BR_FDB_FLUSH])
964 		br_fdb_flush(br);
965 
966 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
967 	if (data[IFLA_BR_MCAST_ROUTER]) {
968 		u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
969 
970 		err = br_multicast_set_router(br, multicast_router);
971 		if (err)
972 			return err;
973 	}
974 
975 	if (data[IFLA_BR_MCAST_SNOOPING]) {
976 		u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
977 
978 		err = br_multicast_toggle(br, mcast_snooping);
979 		if (err)
980 			return err;
981 	}
982 
983 	if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
984 		u8 val;
985 
986 		val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
987 		br->multicast_query_use_ifaddr = !!val;
988 	}
989 
990 	if (data[IFLA_BR_MCAST_QUERIER]) {
991 		u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
992 
993 		err = br_multicast_set_querier(br, mcast_querier);
994 		if (err)
995 			return err;
996 	}
997 
998 	if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
999 		u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
1000 
1001 		br->hash_elasticity = val;
1002 	}
1003 
1004 	if (data[IFLA_BR_MCAST_HASH_MAX]) {
1005 		u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1006 
1007 		err = br_multicast_set_hash_max(br, hash_max);
1008 		if (err)
1009 			return err;
1010 	}
1011 
1012 	if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
1013 		u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
1014 
1015 		br->multicast_last_member_count = val;
1016 	}
1017 
1018 	if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
1019 		u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
1020 
1021 		br->multicast_startup_query_count = val;
1022 	}
1023 
1024 	if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
1025 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
1026 
1027 		br->multicast_last_member_interval = clock_t_to_jiffies(val);
1028 	}
1029 
1030 	if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
1031 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
1032 
1033 		br->multicast_membership_interval = clock_t_to_jiffies(val);
1034 	}
1035 
1036 	if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
1037 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
1038 
1039 		br->multicast_querier_interval = clock_t_to_jiffies(val);
1040 	}
1041 
1042 	if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
1043 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
1044 
1045 		br->multicast_query_interval = clock_t_to_jiffies(val);
1046 	}
1047 
1048 	if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
1049 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
1050 
1051 		br->multicast_query_response_interval = clock_t_to_jiffies(val);
1052 	}
1053 
1054 	if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
1055 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
1056 
1057 		br->multicast_startup_query_interval = clock_t_to_jiffies(val);
1058 	}
1059 
1060 	if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1061 		__u8 mcast_stats;
1062 
1063 		mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1064 		br->multicast_stats_enabled = !!mcast_stats;
1065 	}
1066 #endif
1067 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1068 	if (data[IFLA_BR_NF_CALL_IPTABLES]) {
1069 		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
1070 
1071 		br->nf_call_iptables = val ? true : false;
1072 	}
1073 
1074 	if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
1075 		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
1076 
1077 		br->nf_call_ip6tables = val ? true : false;
1078 	}
1079 
1080 	if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
1081 		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
1082 
1083 		br->nf_call_arptables = val ? true : false;
1084 	}
1085 #endif
1086 
1087 	return 0;
1088 }
1089 
1090 static size_t br_get_size(const struct net_device *brdev)
1091 {
1092 	return nla_total_size(sizeof(u32)) +	/* IFLA_BR_FORWARD_DELAY  */
1093 	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_HELLO_TIME */
1094 	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_MAX_AGE */
1095 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
1096 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
1097 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
1098 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
1099 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1100 	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
1101 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
1102 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_STATS_ENABLED */
1103 #endif
1104 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
1105 	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
1106 	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_BRIDGE_ID */
1107 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_ROOT_PORT */
1108 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
1109 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
1110 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1111 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
1112 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
1113 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
1114 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1115 	       nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
1116 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1117 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
1118 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_SNOOPING */
1119 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1120 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERIER */
1121 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_STATS_ENABLED */
1122 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_ELASTICITY */
1123 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
1124 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
1125 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1126 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
1127 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
1128 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
1129 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
1130 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
1131 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1132 #endif
1133 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1134 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
1135 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IP6TABLES */
1136 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_ARPTABLES */
1137 #endif
1138 	       0;
1139 }
1140 
1141 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1142 {
1143 	struct net_bridge *br = netdev_priv(brdev);
1144 	u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
1145 	u32 hello_time = jiffies_to_clock_t(br->hello_time);
1146 	u32 age_time = jiffies_to_clock_t(br->max_age);
1147 	u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
1148 	u32 stp_enabled = br->stp_enabled;
1149 	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1150 	u8 vlan_enabled = br_vlan_enabled(br);
1151 	u64 clockval;
1152 
1153 	clockval = br_timer_value(&br->hello_timer);
1154 	if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1155 		return -EMSGSIZE;
1156 	clockval = br_timer_value(&br->tcn_timer);
1157 	if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1158 		return -EMSGSIZE;
1159 	clockval = br_timer_value(&br->topology_change_timer);
1160 	if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
1161 			      IFLA_BR_PAD))
1162 		return -EMSGSIZE;
1163 	clockval = br_timer_value(&br->gc_timer);
1164 	if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1165 		return -EMSGSIZE;
1166 
1167 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
1168 	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1169 	    nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
1170 	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
1171 	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1172 	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1173 	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1174 	    nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
1175 	    nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
1176 		    &br->bridge_id) ||
1177 	    nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
1178 		    &br->designated_root) ||
1179 	    nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1180 	    nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
1181 	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
1182 	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1183 		       br->topology_change_detected) ||
1184 	    nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
1185 		return -EMSGSIZE;
1186 
1187 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1188 	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1189 	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1190 	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
1191 		return -EMSGSIZE;
1192 #endif
1193 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1194 	if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
1195 	    nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
1196 	    nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1197 		       br->multicast_query_use_ifaddr) ||
1198 	    nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
1199 	    nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1200 		       br->multicast_stats_enabled) ||
1201 	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
1202 			br->hash_elasticity) ||
1203 	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
1204 	    nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1205 			br->multicast_last_member_count) ||
1206 	    nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1207 			br->multicast_startup_query_count))
1208 		return -EMSGSIZE;
1209 
1210 	clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
1211 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
1212 			      IFLA_BR_PAD))
1213 		return -EMSGSIZE;
1214 	clockval = jiffies_to_clock_t(br->multicast_membership_interval);
1215 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
1216 			      IFLA_BR_PAD))
1217 		return -EMSGSIZE;
1218 	clockval = jiffies_to_clock_t(br->multicast_querier_interval);
1219 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
1220 			      IFLA_BR_PAD))
1221 		return -EMSGSIZE;
1222 	clockval = jiffies_to_clock_t(br->multicast_query_interval);
1223 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
1224 			      IFLA_BR_PAD))
1225 		return -EMSGSIZE;
1226 	clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
1227 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
1228 			      IFLA_BR_PAD))
1229 		return -EMSGSIZE;
1230 	clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
1231 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
1232 			      IFLA_BR_PAD))
1233 		return -EMSGSIZE;
1234 #endif
1235 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1236 	if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1237 		       br->nf_call_iptables ? 1 : 0) ||
1238 	    nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1239 		       br->nf_call_ip6tables ? 1 : 0) ||
1240 	    nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1241 		       br->nf_call_arptables ? 1 : 0))
1242 		return -EMSGSIZE;
1243 #endif
1244 
1245 	return 0;
1246 }
1247 
1248 static size_t bridge_get_linkxstats_size(const struct net_device *dev)
1249 {
1250 	struct net_bridge *br = netdev_priv(dev);
1251 	struct net_bridge_vlan_group *vg;
1252 	struct net_bridge_vlan *v;
1253 	int numvls = 0;
1254 
1255 	vg = br_vlan_group(br);
1256 	if (vg) {
1257 		/* we need to count all, even placeholder entries */
1258 		list_for_each_entry(v, &vg->vlan_list, vlist)
1259 			numvls++;
1260 	}
1261 
1262 	return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1263 	       nla_total_size(sizeof(struct br_mcast_stats)) +
1264 	       nla_total_size(0);
1265 }
1266 
1267 static size_t brport_get_linkxstats_size(const struct net_device *dev)
1268 {
1269 	return nla_total_size(sizeof(struct br_mcast_stats)) +
1270 	       nla_total_size(0);
1271 }
1272 
1273 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1274 {
1275 	size_t retsize = 0;
1276 
1277 	switch (attr) {
1278 	case IFLA_STATS_LINK_XSTATS:
1279 		retsize = bridge_get_linkxstats_size(dev);
1280 		break;
1281 	case IFLA_STATS_LINK_XSTATS_SLAVE:
1282 		retsize = brport_get_linkxstats_size(dev);
1283 		break;
1284 	}
1285 
1286 	return retsize;
1287 }
1288 
1289 static int bridge_fill_linkxstats(struct sk_buff *skb,
1290 				  const struct net_device *dev,
1291 				  int *prividx)
1292 {
1293 	struct net_bridge *br = netdev_priv(dev);
1294 	struct nlattr *nla __maybe_unused;
1295 	struct net_bridge_vlan_group *vg;
1296 	struct net_bridge_vlan *v;
1297 	struct nlattr *nest;
1298 	int vl_idx = 0;
1299 
1300 	nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
1301 	if (!nest)
1302 		return -EMSGSIZE;
1303 
1304 	vg = br_vlan_group(br);
1305 	if (vg) {
1306 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1307 			struct bridge_vlan_xstats vxi;
1308 			struct br_vlan_stats stats;
1309 
1310 			if (++vl_idx < *prividx)
1311 				continue;
1312 			memset(&vxi, 0, sizeof(vxi));
1313 			vxi.vid = v->vid;
1314 			br_vlan_get_stats(v, &stats);
1315 			vxi.rx_bytes = stats.rx_bytes;
1316 			vxi.rx_packets = stats.rx_packets;
1317 			vxi.tx_bytes = stats.tx_bytes;
1318 			vxi.tx_packets = stats.tx_packets;
1319 
1320 			if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1321 				goto nla_put_failure;
1322 		}
1323 	}
1324 
1325 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1326 	if (++vl_idx >= *prividx) {
1327 		nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1328 					sizeof(struct br_mcast_stats),
1329 					BRIDGE_XSTATS_PAD);
1330 		if (!nla)
1331 			goto nla_put_failure;
1332 		br_multicast_get_stats(br, NULL, nla_data(nla));
1333 	}
1334 #endif
1335 	nla_nest_end(skb, nest);
1336 	*prividx = 0;
1337 
1338 	return 0;
1339 
1340 nla_put_failure:
1341 	nla_nest_end(skb, nest);
1342 	*prividx = vl_idx;
1343 
1344 	return -EMSGSIZE;
1345 }
1346 
1347 static int brport_fill_linkxstats(struct sk_buff *skb,
1348 				  const struct net_device *dev,
1349 				  int *prividx)
1350 {
1351 	struct net_bridge_port *p = br_port_get_rtnl(dev);
1352 	struct nlattr *nla __maybe_unused;
1353 	struct nlattr *nest;
1354 
1355 	if (!p)
1356 		return 0;
1357 
1358 	nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
1359 	if (!nest)
1360 		return -EMSGSIZE;
1361 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1362 	nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1363 				sizeof(struct br_mcast_stats),
1364 				BRIDGE_XSTATS_PAD);
1365 	if (!nla) {
1366 		nla_nest_end(skb, nest);
1367 		return -EMSGSIZE;
1368 	}
1369 	br_multicast_get_stats(p->br, p, nla_data(nla));
1370 #endif
1371 	nla_nest_end(skb, nest);
1372 
1373 	return 0;
1374 }
1375 
1376 static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
1377 			      int *prividx, int attr)
1378 {
1379 	int ret = -EINVAL;
1380 
1381 	switch (attr) {
1382 	case IFLA_STATS_LINK_XSTATS:
1383 		ret = bridge_fill_linkxstats(skb, dev, prividx);
1384 		break;
1385 	case IFLA_STATS_LINK_XSTATS_SLAVE:
1386 		ret = brport_fill_linkxstats(skb, dev, prividx);
1387 		break;
1388 	}
1389 
1390 	return ret;
1391 }
1392 
1393 static struct rtnl_af_ops br_af_ops __read_mostly = {
1394 	.family			= AF_BRIDGE,
1395 	.get_link_af_size	= br_get_link_af_size_filtered,
1396 };
1397 
1398 struct rtnl_link_ops br_link_ops __read_mostly = {
1399 	.kind			= "bridge",
1400 	.priv_size		= sizeof(struct net_bridge),
1401 	.setup			= br_dev_setup,
1402 	.maxtype		= IFLA_BR_MAX,
1403 	.policy			= br_policy,
1404 	.validate		= br_validate,
1405 	.newlink		= br_dev_newlink,
1406 	.changelink		= br_changelink,
1407 	.dellink		= br_dev_delete,
1408 	.get_size		= br_get_size,
1409 	.fill_info		= br_fill_info,
1410 	.fill_linkxstats	= br_fill_linkxstats,
1411 	.get_linkxstats_size	= br_get_linkxstats_size,
1412 
1413 	.slave_maxtype		= IFLA_BRPORT_MAX,
1414 	.slave_policy		= br_port_policy,
1415 	.slave_changelink	= br_port_slave_changelink,
1416 	.get_slave_size		= br_port_get_slave_size,
1417 	.fill_slave_info	= br_port_fill_slave_info,
1418 };
1419 
1420 int __init br_netlink_init(void)
1421 {
1422 	int err;
1423 
1424 	br_mdb_init();
1425 	rtnl_af_register(&br_af_ops);
1426 
1427 	err = rtnl_link_register(&br_link_ops);
1428 	if (err)
1429 		goto out_af;
1430 
1431 	return 0;
1432 
1433 out_af:
1434 	rtnl_af_unregister(&br_af_ops);
1435 	br_mdb_uninit();
1436 	return err;
1437 }
1438 
1439 void br_netlink_fini(void)
1440 {
1441 	br_mdb_uninit();
1442 	rtnl_af_unregister(&br_af_ops);
1443 	rtnl_link_unregister(&br_link_ops);
1444 }
1445