xref: /openbmc/linux/net/bridge/br_netlink.c (revision 00d0f31a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Bridge netlink control interface
4  *
5  *	Authors:
6  *	Stephen Hemminger		<shemminger@osdl.org>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/etherdevice.h>
12 #include <net/rtnetlink.h>
13 #include <net/net_namespace.h>
14 #include <net/sock.h>
15 #include <uapi/linux/if_bridge.h>
16 
17 #include "br_private.h"
18 #include "br_private_stp.h"
19 #include "br_private_cfm.h"
20 #include "br_private_tunnel.h"
21 #include "br_private_mcast_eht.h"
22 
23 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
24 				u32 filter_mask)
25 {
26 	struct net_bridge_vlan *v;
27 	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
28 	u16 flags, pvid;
29 	int num_vlans = 0;
30 
31 	if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
32 		return 0;
33 
34 	pvid = br_get_pvid(vg);
35 	/* Count number of vlan infos */
36 	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
37 		flags = 0;
38 		/* only a context, bridge vlan not activated */
39 		if (!br_vlan_should_use(v))
40 			continue;
41 		if (v->vid == pvid)
42 			flags |= BRIDGE_VLAN_INFO_PVID;
43 
44 		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
45 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
46 
47 		if (vid_range_start == 0) {
48 			goto initvars;
49 		} else if ((v->vid - vid_range_end) == 1 &&
50 			flags == vid_range_flags) {
51 			vid_range_end = v->vid;
52 			continue;
53 		} else {
54 			if ((vid_range_end - vid_range_start) > 0)
55 				num_vlans += 2;
56 			else
57 				num_vlans += 1;
58 		}
59 initvars:
60 		vid_range_start = v->vid;
61 		vid_range_end = v->vid;
62 		vid_range_flags = flags;
63 	}
64 
65 	if (vid_range_start != 0) {
66 		if ((vid_range_end - vid_range_start) > 0)
67 			num_vlans += 2;
68 		else
69 			num_vlans += 1;
70 	}
71 
72 	return num_vlans;
73 }
74 
75 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
76 				 u32 filter_mask)
77 {
78 	int num_vlans;
79 
80 	if (!vg)
81 		return 0;
82 
83 	if (filter_mask & RTEXT_FILTER_BRVLAN)
84 		return vg->num_vlans;
85 
86 	rcu_read_lock();
87 	num_vlans = __get_num_vlan_infos(vg, filter_mask);
88 	rcu_read_unlock();
89 
90 	return num_vlans;
91 }
92 
93 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
94 					   u32 filter_mask)
95 {
96 	struct net_bridge_vlan_group *vg = NULL;
97 	struct net_bridge_port *p = NULL;
98 	struct net_bridge *br = NULL;
99 	u32 num_cfm_peer_mep_infos;
100 	u32 num_cfm_mep_infos;
101 	size_t vinfo_sz = 0;
102 	int num_vlan_infos;
103 
104 	rcu_read_lock();
105 	if (netif_is_bridge_port(dev)) {
106 		p = br_port_get_check_rcu(dev);
107 		if (p)
108 			vg = nbp_vlan_group_rcu(p);
109 	} else if (netif_is_bridge_master(dev)) {
110 		br = netdev_priv(dev);
111 		vg = br_vlan_group_rcu(br);
112 	}
113 	num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
114 	rcu_read_unlock();
115 
116 	if (p && (p->flags & BR_VLAN_TUNNEL))
117 		vinfo_sz += br_get_vlan_tunnel_info_size(vg);
118 
119 	/* Each VLAN is returned in bridge_vlan_info along with flags */
120 	vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
121 
122 	if (p && vg && (filter_mask & RTEXT_FILTER_MST))
123 		vinfo_sz += br_mst_info_size(vg);
124 
125 	if (!(filter_mask & RTEXT_FILTER_CFM_STATUS))
126 		return vinfo_sz;
127 
128 	if (!br)
129 		return vinfo_sz;
130 
131 	/* CFM status info must be added */
132 	br_cfm_mep_count(br, &num_cfm_mep_infos);
133 	br_cfm_peer_mep_count(br, &num_cfm_peer_mep_infos);
134 
135 	vinfo_sz += nla_total_size(0);	/* IFLA_BRIDGE_CFM */
136 	/* For each status struct the MEP instance (u32) is added */
137 	/* MEP instance (u32) + br_cfm_mep_status */
138 	vinfo_sz += num_cfm_mep_infos *
139 		     /*IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE */
140 		    (nla_total_size(sizeof(u32))
141 		     /* IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN */
142 		     + nla_total_size(sizeof(u32))
143 		     /* IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN */
144 		     + nla_total_size(sizeof(u32))
145 		     /* IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN */
146 		     + nla_total_size(sizeof(u32)));
147 	/* MEP instance (u32) + br_cfm_cc_peer_status */
148 	vinfo_sz += num_cfm_peer_mep_infos *
149 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE */
150 		    (nla_total_size(sizeof(u32))
151 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID */
152 		     + nla_total_size(sizeof(u32))
153 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT */
154 		     + nla_total_size(sizeof(u32))
155 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI */
156 		     + nla_total_size(sizeof(u32))
157 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE */
158 		     + nla_total_size(sizeof(u8))
159 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE */
160 		     + nla_total_size(sizeof(u8))
161 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN */
162 		     + nla_total_size(sizeof(u32))
163 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN */
164 		     + nla_total_size(sizeof(u32))
165 		     /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN */
166 		     + nla_total_size(sizeof(u32)));
167 
168 	return vinfo_sz;
169 }
170 
171 static inline size_t br_port_info_size(void)
172 {
173 	return nla_total_size(1)	/* IFLA_BRPORT_STATE  */
174 		+ nla_total_size(2)	/* IFLA_BRPORT_PRIORITY */
175 		+ nla_total_size(4)	/* IFLA_BRPORT_COST */
176 		+ nla_total_size(1)	/* IFLA_BRPORT_MODE */
177 		+ nla_total_size(1)	/* IFLA_BRPORT_GUARD */
178 		+ nla_total_size(1)	/* IFLA_BRPORT_PROTECT */
179 		+ nla_total_size(1)	/* IFLA_BRPORT_FAST_LEAVE */
180 		+ nla_total_size(1)	/* IFLA_BRPORT_MCAST_TO_UCAST */
181 		+ nla_total_size(1)	/* IFLA_BRPORT_LEARNING */
182 		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
183 		+ nla_total_size(1)	/* IFLA_BRPORT_MCAST_FLOOD */
184 		+ nla_total_size(1)	/* IFLA_BRPORT_BCAST_FLOOD */
185 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
186 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP_WIFI */
187 		+ nla_total_size(1)	/* IFLA_BRPORT_VLAN_TUNNEL */
188 		+ nla_total_size(1)	/* IFLA_BRPORT_NEIGH_SUPPRESS */
189 		+ nla_total_size(1)	/* IFLA_BRPORT_ISOLATED */
190 		+ nla_total_size(1)	/* IFLA_BRPORT_LOCKED */
191 		+ nla_total_size(1)	/* IFLA_BRPORT_MAB */
192 		+ nla_total_size(1)	/* IFLA_BRPORT_NEIGH_VLAN_SUPPRESS */
193 		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_ROOT_ID */
194 		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_BRIDGE_ID */
195 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_PORT */
196 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_COST */
197 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_ID */
198 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_NO */
199 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
200 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_CONFIG_PENDING */
201 		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
202 		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
203 		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
204 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
205 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MULTICAST_ROUTER */
206 		+ nla_total_size(sizeof(u32))	/* IFLA_BRPORT_MCAST_N_GROUPS */
207 		+ nla_total_size(sizeof(u32))	/* IFLA_BRPORT_MCAST_MAX_GROUPS */
208 #endif
209 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_GROUP_FWD_MASK */
210 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MRP_RING_OPEN */
211 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MRP_IN_OPEN */
212 		+ nla_total_size(sizeof(u32))	/* IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT */
213 		+ nla_total_size(sizeof(u32))	/* IFLA_BRPORT_MCAST_EHT_HOSTS_CNT */
214 		+ 0;
215 }
216 
217 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
218 {
219 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
220 		+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
221 		+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
222 		+ nla_total_size(4) /* IFLA_MASTER */
223 		+ nla_total_size(4) /* IFLA_MTU */
224 		+ nla_total_size(4) /* IFLA_LINK */
225 		+ nla_total_size(1) /* IFLA_OPERSTATE */
226 		+ nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
227 		+ nla_total_size(br_get_link_af_size_filtered(dev,
228 				 filter_mask)) /* IFLA_AF_SPEC */
229 		+ nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
230 }
231 
232 static int br_port_fill_attrs(struct sk_buff *skb,
233 			      const struct net_bridge_port *p)
234 {
235 	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
236 	struct net_bridge_port *backup_p;
237 	u64 timerval;
238 
239 	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
240 	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
241 	    nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
242 	    nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
243 	    nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
244 	    nla_put_u8(skb, IFLA_BRPORT_PROTECT,
245 		       !!(p->flags & BR_ROOT_BLOCK)) ||
246 	    nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
247 		       !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
248 	    nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
249 		       !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
250 	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
251 	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
252 		       !!(p->flags & BR_FLOOD)) ||
253 	    nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
254 		       !!(p->flags & BR_MCAST_FLOOD)) ||
255 	    nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
256 		       !!(p->flags & BR_BCAST_FLOOD)) ||
257 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
258 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
259 		       !!(p->flags & BR_PROXYARP_WIFI)) ||
260 	    nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
261 		    &p->designated_root) ||
262 	    nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
263 		    &p->designated_bridge) ||
264 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
265 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
266 	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
267 	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
268 	    nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
269 		       p->topology_change_ack) ||
270 	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
271 	    nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
272 							BR_VLAN_TUNNEL)) ||
273 	    nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
274 	    nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
275 		       !!(p->flags & BR_NEIGH_SUPPRESS)) ||
276 	    nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags &
277 							  BR_MRP_LOST_CONT)) ||
278 	    nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN,
279 		       !!(p->flags & BR_MRP_LOST_IN_CONT)) ||
280 	    nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) ||
281 	    nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)) ||
282 	    nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB)) ||
283 	    nla_put_u8(skb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
284 		       !!(p->flags & BR_NEIGH_VLAN_SUPPRESS)))
285 		return -EMSGSIZE;
286 
287 	timerval = br_timer_value(&p->message_age_timer);
288 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
289 			      IFLA_BRPORT_PAD))
290 		return -EMSGSIZE;
291 	timerval = br_timer_value(&p->forward_delay_timer);
292 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
293 			      IFLA_BRPORT_PAD))
294 		return -EMSGSIZE;
295 	timerval = br_timer_value(&p->hold_timer);
296 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
297 			      IFLA_BRPORT_PAD))
298 		return -EMSGSIZE;
299 
300 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
301 	if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
302 		       p->multicast_ctx.multicast_router) ||
303 	    nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
304 			p->multicast_eht_hosts_limit) ||
305 	    nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
306 			p->multicast_eht_hosts_cnt) ||
307 	    nla_put_u32(skb, IFLA_BRPORT_MCAST_N_GROUPS,
308 			br_multicast_ngroups_get(&p->multicast_ctx)) ||
309 	    nla_put_u32(skb, IFLA_BRPORT_MCAST_MAX_GROUPS,
310 			br_multicast_ngroups_get_max(&p->multicast_ctx)))
311 		return -EMSGSIZE;
312 #endif
313 
314 	/* we might be called only with br->lock */
315 	rcu_read_lock();
316 	backup_p = rcu_dereference(p->backup_port);
317 	if (backup_p)
318 		nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
319 			    backup_p->dev->ifindex);
320 	rcu_read_unlock();
321 
322 	return 0;
323 }
324 
325 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
326 				    u16 vid_end, u16 flags)
327 {
328 	struct  bridge_vlan_info vinfo;
329 
330 	if ((vid_end - vid_start) > 0) {
331 		/* add range to skb */
332 		vinfo.vid = vid_start;
333 		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
334 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
335 			    sizeof(vinfo), &vinfo))
336 			goto nla_put_failure;
337 
338 		vinfo.vid = vid_end;
339 		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
340 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
341 			    sizeof(vinfo), &vinfo))
342 			goto nla_put_failure;
343 	} else {
344 		vinfo.vid = vid_start;
345 		vinfo.flags = flags;
346 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
347 			    sizeof(vinfo), &vinfo))
348 			goto nla_put_failure;
349 	}
350 
351 	return 0;
352 
353 nla_put_failure:
354 	return -EMSGSIZE;
355 }
356 
357 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
358 					 struct net_bridge_vlan_group *vg)
359 {
360 	struct net_bridge_vlan *v;
361 	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
362 	u16 flags, pvid;
363 	int err = 0;
364 
365 	/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
366 	 * and mark vlan info with begin and end flags
367 	 * if vlaninfo represents a range
368 	 */
369 	pvid = br_get_pvid(vg);
370 	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
371 		flags = 0;
372 		if (!br_vlan_should_use(v))
373 			continue;
374 		if (v->vid == pvid)
375 			flags |= BRIDGE_VLAN_INFO_PVID;
376 
377 		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
378 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
379 
380 		if (vid_range_start == 0) {
381 			goto initvars;
382 		} else if ((v->vid - vid_range_end) == 1 &&
383 			flags == vid_range_flags) {
384 			vid_range_end = v->vid;
385 			continue;
386 		} else {
387 			err = br_fill_ifvlaninfo_range(skb, vid_range_start,
388 						       vid_range_end,
389 						       vid_range_flags);
390 			if (err)
391 				return err;
392 		}
393 
394 initvars:
395 		vid_range_start = v->vid;
396 		vid_range_end = v->vid;
397 		vid_range_flags = flags;
398 	}
399 
400 	if (vid_range_start != 0) {
401 		/* Call it once more to send any left over vlans */
402 		err = br_fill_ifvlaninfo_range(skb, vid_range_start,
403 					       vid_range_end,
404 					       vid_range_flags);
405 		if (err)
406 			return err;
407 	}
408 
409 	return 0;
410 }
411 
412 static int br_fill_ifvlaninfo(struct sk_buff *skb,
413 			      struct net_bridge_vlan_group *vg)
414 {
415 	struct bridge_vlan_info vinfo;
416 	struct net_bridge_vlan *v;
417 	u16 pvid;
418 
419 	pvid = br_get_pvid(vg);
420 	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
421 		if (!br_vlan_should_use(v))
422 			continue;
423 
424 		vinfo.vid = v->vid;
425 		vinfo.flags = 0;
426 		if (v->vid == pvid)
427 			vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
428 
429 		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
430 			vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
431 
432 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
433 			    sizeof(vinfo), &vinfo))
434 			goto nla_put_failure;
435 	}
436 
437 	return 0;
438 
439 nla_put_failure:
440 	return -EMSGSIZE;
441 }
442 
443 /*
444  * Create one netlink message for one interface
445  * Contains port and master info as well as carrier and bridge state.
446  */
447 static int br_fill_ifinfo(struct sk_buff *skb,
448 			  const struct net_bridge_port *port,
449 			  u32 pid, u32 seq, int event, unsigned int flags,
450 			  u32 filter_mask, const struct net_device *dev,
451 			  bool getlink)
452 {
453 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
454 	struct nlattr *af = NULL;
455 	struct net_bridge *br;
456 	struct ifinfomsg *hdr;
457 	struct nlmsghdr *nlh;
458 
459 	if (port)
460 		br = port->br;
461 	else
462 		br = netdev_priv(dev);
463 
464 	br_debug(br, "br_fill_info event %d port %s master %s\n",
465 		     event, dev->name, br->dev->name);
466 
467 	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
468 	if (nlh == NULL)
469 		return -EMSGSIZE;
470 
471 	hdr = nlmsg_data(nlh);
472 	hdr->ifi_family = AF_BRIDGE;
473 	hdr->__ifi_pad = 0;
474 	hdr->ifi_type = dev->type;
475 	hdr->ifi_index = dev->ifindex;
476 	hdr->ifi_flags = dev_get_flags(dev);
477 	hdr->ifi_change = 0;
478 
479 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
480 	    nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
481 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
482 	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
483 	    (dev->addr_len &&
484 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
485 	    (dev->ifindex != dev_get_iflink(dev) &&
486 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
487 		goto nla_put_failure;
488 
489 	if (event == RTM_NEWLINK && port) {
490 		struct nlattr *nest;
491 
492 		nest = nla_nest_start(skb, IFLA_PROTINFO);
493 		if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
494 			goto nla_put_failure;
495 		nla_nest_end(skb, nest);
496 	}
497 
498 	if (filter_mask & (RTEXT_FILTER_BRVLAN |
499 			   RTEXT_FILTER_BRVLAN_COMPRESSED |
500 			   RTEXT_FILTER_MRP |
501 			   RTEXT_FILTER_CFM_CONFIG |
502 			   RTEXT_FILTER_CFM_STATUS |
503 			   RTEXT_FILTER_MST)) {
504 		af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
505 		if (!af)
506 			goto nla_put_failure;
507 	}
508 
509 	/* Check if  the VID information is requested */
510 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
511 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
512 		struct net_bridge_vlan_group *vg;
513 		int err;
514 
515 		/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
516 		rcu_read_lock();
517 		if (port)
518 			vg = nbp_vlan_group_rcu(port);
519 		else
520 			vg = br_vlan_group_rcu(br);
521 
522 		if (!vg || !vg->num_vlans) {
523 			rcu_read_unlock();
524 			goto done;
525 		}
526 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
527 			err = br_fill_ifvlaninfo_compressed(skb, vg);
528 		else
529 			err = br_fill_ifvlaninfo(skb, vg);
530 
531 		if (port && (port->flags & BR_VLAN_TUNNEL))
532 			err = br_fill_vlan_tunnel_info(skb, vg);
533 		rcu_read_unlock();
534 		if (err)
535 			goto nla_put_failure;
536 	}
537 
538 	if (filter_mask & RTEXT_FILTER_MRP) {
539 		int err;
540 
541 		if (!br_mrp_enabled(br) || port)
542 			goto done;
543 
544 		rcu_read_lock();
545 		err = br_mrp_fill_info(skb, br);
546 		rcu_read_unlock();
547 
548 		if (err)
549 			goto nla_put_failure;
550 	}
551 
552 	if (filter_mask & (RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS)) {
553 		struct nlattr *cfm_nest = NULL;
554 		int err;
555 
556 		if (!br_cfm_created(br) || port)
557 			goto done;
558 
559 		cfm_nest = nla_nest_start(skb, IFLA_BRIDGE_CFM);
560 		if (!cfm_nest)
561 			goto nla_put_failure;
562 
563 		if (filter_mask & RTEXT_FILTER_CFM_CONFIG) {
564 			rcu_read_lock();
565 			err = br_cfm_config_fill_info(skb, br);
566 			rcu_read_unlock();
567 			if (err)
568 				goto nla_put_failure;
569 		}
570 
571 		if (filter_mask & RTEXT_FILTER_CFM_STATUS) {
572 			rcu_read_lock();
573 			err = br_cfm_status_fill_info(skb, br, getlink);
574 			rcu_read_unlock();
575 			if (err)
576 				goto nla_put_failure;
577 		}
578 
579 		nla_nest_end(skb, cfm_nest);
580 	}
581 
582 	if ((filter_mask & RTEXT_FILTER_MST) &&
583 	    br_opt_get(br, BROPT_MST_ENABLED) && port) {
584 		const struct net_bridge_vlan_group *vg = nbp_vlan_group(port);
585 		struct nlattr *mst_nest;
586 		int err;
587 
588 		if (!vg || !vg->num_vlans)
589 			goto done;
590 
591 		mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST);
592 		if (!mst_nest)
593 			goto nla_put_failure;
594 
595 		err = br_mst_fill_info(skb, vg);
596 		if (err)
597 			goto nla_put_failure;
598 
599 		nla_nest_end(skb, mst_nest);
600 	}
601 
602 done:
603 	if (af) {
604 		if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
605 			nla_nest_end(skb, af);
606 		else
607 			nla_nest_cancel(skb, af);
608 	}
609 
610 	nlmsg_end(skb, nlh);
611 	return 0;
612 
613 nla_put_failure:
614 	nlmsg_cancel(skb, nlh);
615 	return -EMSGSIZE;
616 }
617 
618 void br_info_notify(int event, const struct net_bridge *br,
619 		    const struct net_bridge_port *port, u32 filter)
620 {
621 	struct net_device *dev;
622 	struct sk_buff *skb;
623 	int err = -ENOBUFS;
624 	struct net *net;
625 	u16 port_no = 0;
626 
627 	if (WARN_ON(!port && !br))
628 		return;
629 
630 	if (port) {
631 		dev = port->dev;
632 		br = port->br;
633 		port_no = port->port_no;
634 	} else {
635 		dev = br->dev;
636 	}
637 
638 	net = dev_net(dev);
639 	br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
640 
641 	skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
642 	if (skb == NULL)
643 		goto errout;
644 
645 	err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev, false);
646 	if (err < 0) {
647 		/* -EMSGSIZE implies BUG in br_nlmsg_size() */
648 		WARN_ON(err == -EMSGSIZE);
649 		kfree_skb(skb);
650 		goto errout;
651 	}
652 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
653 	return;
654 errout:
655 	rtnl_set_sk_err(net, RTNLGRP_LINK, err);
656 }
657 
658 /* Notify listeners of a change in bridge or port information */
659 void br_ifinfo_notify(int event, const struct net_bridge *br,
660 		      const struct net_bridge_port *port)
661 {
662 	u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
663 
664 	return br_info_notify(event, br, port, filter);
665 }
666 
667 /*
668  * Dump information about all ports, in response to GETLINK
669  */
670 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
671 	       struct net_device *dev, u32 filter_mask, int nlflags)
672 {
673 	struct net_bridge_port *port = br_port_get_rtnl(dev);
674 
675 	if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
676 	    !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) &&
677 	    !(filter_mask & RTEXT_FILTER_MRP) &&
678 	    !(filter_mask & RTEXT_FILTER_CFM_CONFIG) &&
679 	    !(filter_mask & RTEXT_FILTER_CFM_STATUS))
680 		return 0;
681 
682 	return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
683 			      filter_mask, dev, true);
684 }
685 
686 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
687 			int cmd, struct bridge_vlan_info *vinfo, bool *changed,
688 			struct netlink_ext_ack *extack)
689 {
690 	bool curr_change;
691 	int err = 0;
692 
693 	switch (cmd) {
694 	case RTM_SETLINK:
695 		if (p) {
696 			/* if the MASTER flag is set this will act on the global
697 			 * per-VLAN entry as well
698 			 */
699 			err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
700 					   &curr_change, extack);
701 		} else {
702 			vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
703 			err = br_vlan_add(br, vinfo->vid, vinfo->flags,
704 					  &curr_change, extack);
705 		}
706 		if (curr_change)
707 			*changed = true;
708 		break;
709 
710 	case RTM_DELLINK:
711 		if (p) {
712 			if (!nbp_vlan_delete(p, vinfo->vid))
713 				*changed = true;
714 
715 			if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
716 			    !br_vlan_delete(p->br, vinfo->vid))
717 				*changed = true;
718 		} else if (!br_vlan_delete(br, vinfo->vid)) {
719 			*changed = true;
720 		}
721 		break;
722 	}
723 
724 	return err;
725 }
726 
727 int br_process_vlan_info(struct net_bridge *br,
728 			 struct net_bridge_port *p, int cmd,
729 			 struct bridge_vlan_info *vinfo_curr,
730 			 struct bridge_vlan_info **vinfo_last,
731 			 bool *changed,
732 			 struct netlink_ext_ack *extack)
733 {
734 	int err, rtm_cmd;
735 
736 	if (!br_vlan_valid_id(vinfo_curr->vid, extack))
737 		return -EINVAL;
738 
739 	/* needed for vlan-only NEWVLAN/DELVLAN notifications */
740 	rtm_cmd = br_afspec_cmd_to_rtm(cmd);
741 
742 	if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
743 		if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
744 			return -EINVAL;
745 		*vinfo_last = vinfo_curr;
746 		return 0;
747 	}
748 
749 	if (*vinfo_last) {
750 		struct bridge_vlan_info tmp_vinfo;
751 		int v, v_change_start = 0;
752 
753 		if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
754 			return -EINVAL;
755 
756 		memcpy(&tmp_vinfo, *vinfo_last,
757 		       sizeof(struct bridge_vlan_info));
758 		for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
759 			bool curr_change = false;
760 
761 			tmp_vinfo.vid = v;
762 			err = br_vlan_info(br, p, cmd, &tmp_vinfo, &curr_change,
763 					   extack);
764 			if (err)
765 				break;
766 			if (curr_change) {
767 				*changed = curr_change;
768 				if (!v_change_start)
769 					v_change_start = v;
770 			} else {
771 				/* nothing to notify yet */
772 				if (!v_change_start)
773 					continue;
774 				br_vlan_notify(br, p, v_change_start,
775 					       v - 1, rtm_cmd);
776 				v_change_start = 0;
777 			}
778 			cond_resched();
779 		}
780 		/* v_change_start is set only if the last/whole range changed */
781 		if (v_change_start)
782 			br_vlan_notify(br, p, v_change_start,
783 				       v - 1, rtm_cmd);
784 
785 		*vinfo_last = NULL;
786 
787 		return err;
788 	}
789 
790 	err = br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
791 	if (*changed)
792 		br_vlan_notify(br, p, vinfo_curr->vid, 0, rtm_cmd);
793 
794 	return err;
795 }
796 
797 static int br_afspec(struct net_bridge *br,
798 		     struct net_bridge_port *p,
799 		     struct nlattr *af_spec,
800 		     int cmd, bool *changed,
801 		     struct netlink_ext_ack *extack)
802 {
803 	struct bridge_vlan_info *vinfo_curr = NULL;
804 	struct bridge_vlan_info *vinfo_last = NULL;
805 	struct nlattr *attr;
806 	struct vtunnel_info tinfo_last = {};
807 	struct vtunnel_info tinfo_curr = {};
808 	int err = 0, rem;
809 
810 	nla_for_each_nested(attr, af_spec, rem) {
811 		err = 0;
812 		switch (nla_type(attr)) {
813 		case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
814 			if (!p || !(p->flags & BR_VLAN_TUNNEL))
815 				return -EINVAL;
816 			err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
817 			if (err)
818 				return err;
819 			err = br_process_vlan_tunnel_info(br, p, cmd,
820 							  &tinfo_curr,
821 							  &tinfo_last,
822 							  changed);
823 			if (err)
824 				return err;
825 			break;
826 		case IFLA_BRIDGE_VLAN_INFO:
827 			if (nla_len(attr) != sizeof(struct bridge_vlan_info))
828 				return -EINVAL;
829 			vinfo_curr = nla_data(attr);
830 			err = br_process_vlan_info(br, p, cmd, vinfo_curr,
831 						   &vinfo_last, changed,
832 						   extack);
833 			if (err)
834 				return err;
835 			break;
836 		case IFLA_BRIDGE_MRP:
837 			err = br_mrp_parse(br, p, attr, cmd, extack);
838 			if (err)
839 				return err;
840 			break;
841 		case IFLA_BRIDGE_CFM:
842 			err = br_cfm_parse(br, p, attr, cmd, extack);
843 			if (err)
844 				return err;
845 			break;
846 		case IFLA_BRIDGE_MST:
847 			if (!p) {
848 				NL_SET_ERR_MSG(extack,
849 					       "MST states can only be set on bridge ports");
850 				return -EINVAL;
851 			}
852 
853 			if (cmd != RTM_SETLINK) {
854 				NL_SET_ERR_MSG(extack,
855 					       "MST states can only be set through RTM_SETLINK");
856 				return -EINVAL;
857 			}
858 
859 			err = br_mst_process(p, attr, extack);
860 			if (err)
861 				return err;
862 			break;
863 		}
864 	}
865 
866 	return err;
867 }
868 
869 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
870 	[IFLA_BRPORT_UNSPEC]	= { .strict_start_type =
871 					IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + 1 },
872 	[IFLA_BRPORT_STATE]	= { .type = NLA_U8 },
873 	[IFLA_BRPORT_COST]	= { .type = NLA_U32 },
874 	[IFLA_BRPORT_PRIORITY]	= { .type = NLA_U16 },
875 	[IFLA_BRPORT_MODE]	= { .type = NLA_U8 },
876 	[IFLA_BRPORT_GUARD]	= { .type = NLA_U8 },
877 	[IFLA_BRPORT_PROTECT]	= { .type = NLA_U8 },
878 	[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
879 	[IFLA_BRPORT_LEARNING]	= { .type = NLA_U8 },
880 	[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
881 	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
882 	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
883 	[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
884 	[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
885 	[IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
886 	[IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
887 	[IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
888 	[IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
889 	[IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
890 	[IFLA_BRPORT_ISOLATED]	= { .type = NLA_U8 },
891 	[IFLA_BRPORT_LOCKED] = { .type = NLA_U8 },
892 	[IFLA_BRPORT_MAB] = { .type = NLA_U8 },
893 	[IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
894 	[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 },
895 	[IFLA_BRPORT_MCAST_N_GROUPS] = { .type = NLA_REJECT },
896 	[IFLA_BRPORT_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
897 	[IFLA_BRPORT_NEIGH_VLAN_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1),
898 };
899 
900 /* Change the state of the port and notify spanning tree */
901 static int br_set_port_state(struct net_bridge_port *p, u8 state)
902 {
903 	if (state > BR_STATE_BLOCKING)
904 		return -EINVAL;
905 
906 	/* if kernel STP is running, don't allow changes */
907 	if (p->br->stp_enabled == BR_KERNEL_STP)
908 		return -EBUSY;
909 
910 	/* if device is not up, change is not allowed
911 	 * if link is not present, only allowable state is disabled
912 	 */
913 	if (!netif_running(p->dev) ||
914 	    (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
915 		return -ENETDOWN;
916 
917 	br_set_state(p, state);
918 	br_port_state_selection(p->br);
919 	return 0;
920 }
921 
922 /* Set/clear or port flags based on attribute */
923 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
924 			     int attrtype, unsigned long mask)
925 {
926 	if (!tb[attrtype])
927 		return;
928 
929 	if (nla_get_u8(tb[attrtype]))
930 		p->flags |= mask;
931 	else
932 		p->flags &= ~mask;
933 }
934 
935 /* Process bridge protocol info on port */
936 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[],
937 		      struct netlink_ext_ack *extack)
938 {
939 	unsigned long old_flags, changed_mask;
940 	bool br_vlan_tunnel_old;
941 	int err;
942 
943 	old_flags = p->flags;
944 	br_vlan_tunnel_old = (old_flags & BR_VLAN_TUNNEL) ? true : false;
945 
946 	br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
947 	br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
948 	br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE,
949 			 BR_MULTICAST_FAST_LEAVE);
950 	br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
951 	br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
952 	br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
953 	br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
954 	br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST,
955 			 BR_MULTICAST_TO_UNICAST);
956 	br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
957 	br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
958 	br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
959 	br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
960 	br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS);
961 	br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
962 	br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED);
963 	br_set_port_flag(p, tb, IFLA_BRPORT_MAB, BR_PORT_MAB);
964 	br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
965 			 BR_NEIGH_VLAN_SUPPRESS);
966 
967 	if ((p->flags & BR_PORT_MAB) &&
968 	    (!(p->flags & BR_PORT_LOCKED) || !(p->flags & BR_LEARNING))) {
969 		NL_SET_ERR_MSG(extack, "Bridge port must be locked and have learning enabled when MAB is enabled");
970 		p->flags = old_flags;
971 		return -EINVAL;
972 	} else if (!(p->flags & BR_PORT_MAB) && (old_flags & BR_PORT_MAB)) {
973 		struct net_bridge_fdb_flush_desc desc = {
974 			.flags = BIT(BR_FDB_LOCKED),
975 			.flags_mask = BIT(BR_FDB_LOCKED),
976 			.port_ifindex = p->dev->ifindex,
977 		};
978 
979 		br_fdb_flush(p->br, &desc);
980 	}
981 
982 	changed_mask = old_flags ^ p->flags;
983 
984 	err = br_switchdev_set_port_flag(p, p->flags, changed_mask, extack);
985 	if (err) {
986 		p->flags = old_flags;
987 		return err;
988 	}
989 
990 	if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
991 		nbp_vlan_tunnel_info_flush(p);
992 
993 	br_port_flags_change(p, changed_mask);
994 
995 	if (tb[IFLA_BRPORT_COST]) {
996 		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
997 		if (err)
998 			return err;
999 	}
1000 
1001 	if (tb[IFLA_BRPORT_PRIORITY]) {
1002 		err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
1003 		if (err)
1004 			return err;
1005 	}
1006 
1007 	if (tb[IFLA_BRPORT_STATE]) {
1008 		err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
1009 		if (err)
1010 			return err;
1011 	}
1012 
1013 	if (tb[IFLA_BRPORT_FLUSH])
1014 		br_fdb_delete_by_port(p->br, p, 0, 0);
1015 
1016 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1017 	if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
1018 		u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
1019 
1020 		err = br_multicast_set_port_router(&p->multicast_ctx,
1021 						   mcast_router);
1022 		if (err)
1023 			return err;
1024 	}
1025 
1026 	if (tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]) {
1027 		u32 hlimit;
1028 
1029 		hlimit = nla_get_u32(tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]);
1030 		err = br_multicast_eht_set_hosts_limit(p, hlimit);
1031 		if (err)
1032 			return err;
1033 	}
1034 
1035 	if (tb[IFLA_BRPORT_MCAST_MAX_GROUPS]) {
1036 		u32 max_groups;
1037 
1038 		max_groups = nla_get_u32(tb[IFLA_BRPORT_MCAST_MAX_GROUPS]);
1039 		br_multicast_ngroups_set_max(&p->multicast_ctx, max_groups);
1040 	}
1041 #endif
1042 
1043 	if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
1044 		u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
1045 
1046 		if (fwd_mask & BR_GROUPFWD_MACPAUSE)
1047 			return -EINVAL;
1048 		p->group_fwd_mask = fwd_mask;
1049 	}
1050 
1051 	if (tb[IFLA_BRPORT_BACKUP_PORT]) {
1052 		struct net_device *backup_dev = NULL;
1053 		u32 backup_ifindex;
1054 
1055 		backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
1056 		if (backup_ifindex) {
1057 			backup_dev = __dev_get_by_index(dev_net(p->dev),
1058 							backup_ifindex);
1059 			if (!backup_dev)
1060 				return -ENOENT;
1061 		}
1062 
1063 		err = nbp_backup_change(p, backup_dev);
1064 		if (err)
1065 			return err;
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 /* Change state and parameters on port. */
1072 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
1073 	       struct netlink_ext_ack *extack)
1074 {
1075 	struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
1076 	struct nlattr *tb[IFLA_BRPORT_MAX + 1];
1077 	struct net_bridge_port *p;
1078 	struct nlattr *protinfo;
1079 	struct nlattr *afspec;
1080 	bool changed = false;
1081 	int err = 0;
1082 
1083 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
1084 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
1085 	if (!protinfo && !afspec)
1086 		return 0;
1087 
1088 	p = br_port_get_rtnl(dev);
1089 	/* We want to accept dev as bridge itself if the AF_SPEC
1090 	 * is set to see if someone is setting vlan info on the bridge
1091 	 */
1092 	if (!p && !afspec)
1093 		return -EINVAL;
1094 
1095 	if (p && protinfo) {
1096 		if (protinfo->nla_type & NLA_F_NESTED) {
1097 			err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
1098 							  protinfo,
1099 							  br_port_policy,
1100 							  NULL);
1101 			if (err)
1102 				return err;
1103 
1104 			spin_lock_bh(&p->br->lock);
1105 			err = br_setport(p, tb, extack);
1106 			spin_unlock_bh(&p->br->lock);
1107 		} else {
1108 			/* Binary compatibility with old RSTP */
1109 			if (nla_len(protinfo) < sizeof(u8))
1110 				return -EINVAL;
1111 
1112 			spin_lock_bh(&p->br->lock);
1113 			err = br_set_port_state(p, nla_get_u8(protinfo));
1114 			spin_unlock_bh(&p->br->lock);
1115 		}
1116 		if (err)
1117 			goto out;
1118 		changed = true;
1119 	}
1120 
1121 	if (afspec)
1122 		err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
1123 
1124 	if (changed)
1125 		br_ifinfo_notify(RTM_NEWLINK, br, p);
1126 out:
1127 	return err;
1128 }
1129 
1130 /* Delete port information */
1131 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
1132 {
1133 	struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
1134 	struct net_bridge_port *p;
1135 	struct nlattr *afspec;
1136 	bool changed = false;
1137 	int err = 0;
1138 
1139 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
1140 	if (!afspec)
1141 		return 0;
1142 
1143 	p = br_port_get_rtnl(dev);
1144 	/* We want to accept dev as bridge itself as well */
1145 	if (!p && !netif_is_bridge_master(dev))
1146 		return -EINVAL;
1147 
1148 	err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
1149 	if (changed)
1150 		/* Send RTM_NEWLINK because userspace
1151 		 * expects RTM_NEWLINK for vlan dels
1152 		 */
1153 		br_ifinfo_notify(RTM_NEWLINK, br, p);
1154 
1155 	return err;
1156 }
1157 
1158 static int br_validate(struct nlattr *tb[], struct nlattr *data[],
1159 		       struct netlink_ext_ack *extack)
1160 {
1161 	if (tb[IFLA_ADDRESS]) {
1162 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1163 			return -EINVAL;
1164 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1165 			return -EADDRNOTAVAIL;
1166 	}
1167 
1168 	if (!data)
1169 		return 0;
1170 
1171 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1172 	if (data[IFLA_BR_VLAN_PROTOCOL] &&
1173 	    !eth_type_vlan(nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])))
1174 		return -EPROTONOSUPPORT;
1175 
1176 	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
1177 		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
1178 
1179 		if (defpvid >= VLAN_VID_MASK)
1180 			return -EINVAL;
1181 	}
1182 #endif
1183 
1184 	return 0;
1185 }
1186 
1187 static int br_port_slave_changelink(struct net_device *brdev,
1188 				    struct net_device *dev,
1189 				    struct nlattr *tb[],
1190 				    struct nlattr *data[],
1191 				    struct netlink_ext_ack *extack)
1192 {
1193 	struct net_bridge *br = netdev_priv(brdev);
1194 	int ret;
1195 
1196 	if (!data)
1197 		return 0;
1198 
1199 	spin_lock_bh(&br->lock);
1200 	ret = br_setport(br_port_get_rtnl(dev), data, extack);
1201 	spin_unlock_bh(&br->lock);
1202 
1203 	return ret;
1204 }
1205 
1206 static int br_port_fill_slave_info(struct sk_buff *skb,
1207 				   const struct net_device *brdev,
1208 				   const struct net_device *dev)
1209 {
1210 	return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
1211 }
1212 
1213 static size_t br_port_get_slave_size(const struct net_device *brdev,
1214 				     const struct net_device *dev)
1215 {
1216 	return br_port_info_size();
1217 }
1218 
1219 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
1220 	[IFLA_BR_FORWARD_DELAY]	= { .type = NLA_U32 },
1221 	[IFLA_BR_HELLO_TIME]	= { .type = NLA_U32 },
1222 	[IFLA_BR_MAX_AGE]	= { .type = NLA_U32 },
1223 	[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
1224 	[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
1225 	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
1226 	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
1227 	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
1228 	[IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
1229 	[IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
1230 				 .len  = ETH_ALEN },
1231 	[IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
1232 	[IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
1233 	[IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
1234 	[IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
1235 	[IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
1236 	[IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
1237 	[IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
1238 	[IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
1239 	[IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
1240 	[IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
1241 	[IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
1242 	[IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
1243 	[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
1244 	[IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
1245 	[IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
1246 	[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
1247 	[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
1248 	[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
1249 	[IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
1250 	[IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
1251 	[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
1252 	[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
1253 	[IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
1254 	[IFLA_BR_MULTI_BOOLOPT] =
1255 		NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)),
1256 };
1257 
1258 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1259 			 struct nlattr *data[],
1260 			 struct netlink_ext_ack *extack)
1261 {
1262 	struct net_bridge *br = netdev_priv(brdev);
1263 	int err;
1264 
1265 	if (!data)
1266 		return 0;
1267 
1268 	if (data[IFLA_BR_FORWARD_DELAY]) {
1269 		err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
1270 		if (err)
1271 			return err;
1272 	}
1273 
1274 	if (data[IFLA_BR_HELLO_TIME]) {
1275 		err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
1276 		if (err)
1277 			return err;
1278 	}
1279 
1280 	if (data[IFLA_BR_MAX_AGE]) {
1281 		err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
1282 		if (err)
1283 			return err;
1284 	}
1285 
1286 	if (data[IFLA_BR_AGEING_TIME]) {
1287 		err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
1288 		if (err)
1289 			return err;
1290 	}
1291 
1292 	if (data[IFLA_BR_STP_STATE]) {
1293 		u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
1294 
1295 		err = br_stp_set_enabled(br, stp_enabled, extack);
1296 		if (err)
1297 			return err;
1298 	}
1299 
1300 	if (data[IFLA_BR_PRIORITY]) {
1301 		u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
1302 
1303 		br_stp_set_bridge_priority(br, priority);
1304 	}
1305 
1306 	if (data[IFLA_BR_VLAN_FILTERING]) {
1307 		u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
1308 
1309 		err = br_vlan_filter_toggle(br, vlan_filter, extack);
1310 		if (err)
1311 			return err;
1312 	}
1313 
1314 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1315 	if (data[IFLA_BR_VLAN_PROTOCOL]) {
1316 		__be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
1317 
1318 		err = __br_vlan_set_proto(br, vlan_proto, extack);
1319 		if (err)
1320 			return err;
1321 	}
1322 
1323 	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
1324 		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
1325 
1326 		err = __br_vlan_set_default_pvid(br, defpvid, extack);
1327 		if (err)
1328 			return err;
1329 	}
1330 
1331 	if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
1332 		__u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
1333 
1334 		err = br_vlan_set_stats(br, vlan_stats);
1335 		if (err)
1336 			return err;
1337 	}
1338 
1339 	if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
1340 		__u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
1341 
1342 		err = br_vlan_set_stats_per_port(br, per_port);
1343 		if (err)
1344 			return err;
1345 	}
1346 #endif
1347 
1348 	if (data[IFLA_BR_GROUP_FWD_MASK]) {
1349 		u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
1350 
1351 		if (fwd_mask & BR_GROUPFWD_RESTRICTED)
1352 			return -EINVAL;
1353 		br->group_fwd_mask = fwd_mask;
1354 	}
1355 
1356 	if (data[IFLA_BR_GROUP_ADDR]) {
1357 		u8 new_addr[ETH_ALEN];
1358 
1359 		if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
1360 			return -EINVAL;
1361 		memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
1362 		if (!is_link_local_ether_addr(new_addr))
1363 			return -EINVAL;
1364 		if (new_addr[5] == 1 ||		/* 802.3x Pause address */
1365 		    new_addr[5] == 2 ||		/* 802.3ad Slow protocols */
1366 		    new_addr[5] == 3)		/* 802.1X PAE address */
1367 			return -EINVAL;
1368 		spin_lock_bh(&br->lock);
1369 		memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
1370 		spin_unlock_bh(&br->lock);
1371 		br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
1372 		br_recalculate_fwd_mask(br);
1373 	}
1374 
1375 	if (data[IFLA_BR_FDB_FLUSH]) {
1376 		struct net_bridge_fdb_flush_desc desc = {
1377 			.flags_mask = BIT(BR_FDB_STATIC)
1378 		};
1379 
1380 		br_fdb_flush(br, &desc);
1381 	}
1382 
1383 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1384 	if (data[IFLA_BR_MCAST_ROUTER]) {
1385 		u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
1386 
1387 		err = br_multicast_set_router(&br->multicast_ctx,
1388 					      multicast_router);
1389 		if (err)
1390 			return err;
1391 	}
1392 
1393 	if (data[IFLA_BR_MCAST_SNOOPING]) {
1394 		u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
1395 
1396 		err = br_multicast_toggle(br, mcast_snooping, extack);
1397 		if (err)
1398 			return err;
1399 	}
1400 
1401 	if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
1402 		u8 val;
1403 
1404 		val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1405 		br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
1406 	}
1407 
1408 	if (data[IFLA_BR_MCAST_QUERIER]) {
1409 		u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
1410 
1411 		err = br_multicast_set_querier(&br->multicast_ctx,
1412 					       mcast_querier);
1413 		if (err)
1414 			return err;
1415 	}
1416 
1417 	if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
1418 		br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
1419 			RHT_ELASTICITY);
1420 
1421 	if (data[IFLA_BR_MCAST_HASH_MAX])
1422 		br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1423 
1424 	if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
1425 		u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
1426 
1427 		br->multicast_ctx.multicast_last_member_count = val;
1428 	}
1429 
1430 	if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
1431 		u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
1432 
1433 		br->multicast_ctx.multicast_startup_query_count = val;
1434 	}
1435 
1436 	if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
1437 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
1438 
1439 		br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
1440 	}
1441 
1442 	if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
1443 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
1444 
1445 		br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
1446 	}
1447 
1448 	if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
1449 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
1450 
1451 		br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
1452 	}
1453 
1454 	if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
1455 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
1456 
1457 		br_multicast_set_query_intvl(&br->multicast_ctx, val);
1458 	}
1459 
1460 	if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
1461 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
1462 
1463 		br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
1464 	}
1465 
1466 	if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
1467 		u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
1468 
1469 		br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
1470 	}
1471 
1472 	if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1473 		__u8 mcast_stats;
1474 
1475 		mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1476 		br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
1477 	}
1478 
1479 	if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
1480 		__u8 igmp_version;
1481 
1482 		igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
1483 		err = br_multicast_set_igmp_version(&br->multicast_ctx,
1484 						    igmp_version);
1485 		if (err)
1486 			return err;
1487 	}
1488 
1489 #if IS_ENABLED(CONFIG_IPV6)
1490 	if (data[IFLA_BR_MCAST_MLD_VERSION]) {
1491 		__u8 mld_version;
1492 
1493 		mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
1494 		err = br_multicast_set_mld_version(&br->multicast_ctx,
1495 						   mld_version);
1496 		if (err)
1497 			return err;
1498 	}
1499 #endif
1500 #endif
1501 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1502 	if (data[IFLA_BR_NF_CALL_IPTABLES]) {
1503 		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
1504 
1505 		br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
1506 	}
1507 
1508 	if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
1509 		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
1510 
1511 		br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
1512 	}
1513 
1514 	if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
1515 		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
1516 
1517 		br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
1518 	}
1519 #endif
1520 
1521 	if (data[IFLA_BR_MULTI_BOOLOPT]) {
1522 		struct br_boolopt_multi *bm;
1523 
1524 		bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
1525 		err = br_boolopt_multi_toggle(br, bm, extack);
1526 		if (err)
1527 			return err;
1528 	}
1529 
1530 	return 0;
1531 }
1532 
1533 static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1534 			  struct nlattr *tb[], struct nlattr *data[],
1535 			  struct netlink_ext_ack *extack)
1536 {
1537 	struct net_bridge *br = netdev_priv(dev);
1538 	int err;
1539 
1540 	err = register_netdevice(dev);
1541 	if (err)
1542 		return err;
1543 
1544 	if (tb[IFLA_ADDRESS]) {
1545 		spin_lock_bh(&br->lock);
1546 		br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1547 		spin_unlock_bh(&br->lock);
1548 	}
1549 
1550 	err = br_changelink(dev, tb, data, extack);
1551 	if (err)
1552 		br_dev_delete(dev, NULL);
1553 
1554 	return err;
1555 }
1556 
1557 static size_t br_get_size(const struct net_device *brdev)
1558 {
1559 	return nla_total_size(sizeof(u32)) +	/* IFLA_BR_FORWARD_DELAY  */
1560 	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_HELLO_TIME */
1561 	       nla_total_size(sizeof(u32)) +	/* IFLA_BR_MAX_AGE */
1562 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
1563 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
1564 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
1565 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
1566 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1567 	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
1568 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
1569 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_STATS_ENABLED */
1570 	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_VLAN_STATS_PER_PORT */
1571 #endif
1572 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
1573 	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
1574 	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_BRIDGE_ID */
1575 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_ROOT_PORT */
1576 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
1577 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
1578 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1579 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
1580 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
1581 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
1582 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1583 	       nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
1584 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1585 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
1586 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_SNOOPING */
1587 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1588 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERIER */
1589 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_STATS_ENABLED */
1590 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_ELASTICITY */
1591 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
1592 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
1593 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1594 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
1595 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
1596 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
1597 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
1598 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
1599 	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1600 	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_MCAST_IGMP_VERSION */
1601 	       nla_total_size(sizeof(u8)) +	/* IFLA_BR_MCAST_MLD_VERSION */
1602 	       br_multicast_querier_state_size() + /* IFLA_BR_MCAST_QUERIER_STATE */
1603 #endif
1604 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1605 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
1606 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IP6TABLES */
1607 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_ARPTABLES */
1608 #endif
1609 	       nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
1610 	       0;
1611 }
1612 
1613 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1614 {
1615 	struct net_bridge *br = netdev_priv(brdev);
1616 	u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
1617 	u32 hello_time = jiffies_to_clock_t(br->hello_time);
1618 	u32 age_time = jiffies_to_clock_t(br->max_age);
1619 	u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
1620 	u32 stp_enabled = br->stp_enabled;
1621 	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1622 	u8 vlan_enabled = br_vlan_enabled(br->dev);
1623 	struct br_boolopt_multi bm;
1624 	u64 clockval;
1625 
1626 	clockval = br_timer_value(&br->hello_timer);
1627 	if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1628 		return -EMSGSIZE;
1629 	clockval = br_timer_value(&br->tcn_timer);
1630 	if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1631 		return -EMSGSIZE;
1632 	clockval = br_timer_value(&br->topology_change_timer);
1633 	if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
1634 			      IFLA_BR_PAD))
1635 		return -EMSGSIZE;
1636 	clockval = br_timer_value(&br->gc_work.timer);
1637 	if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1638 		return -EMSGSIZE;
1639 
1640 	br_boolopt_multi_get(br, &bm);
1641 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
1642 	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1643 	    nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
1644 	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
1645 	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1646 	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1647 	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1648 	    nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
1649 	    nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
1650 		    &br->bridge_id) ||
1651 	    nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
1652 		    &br->designated_root) ||
1653 	    nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1654 	    nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
1655 	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
1656 	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1657 		       br->topology_change_detected) ||
1658 	    nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
1659 	    nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
1660 		return -EMSGSIZE;
1661 
1662 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1663 	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1664 	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1665 	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
1666 		       br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
1667 	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
1668 		       br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
1669 		return -EMSGSIZE;
1670 #endif
1671 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1672 	if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER,
1673 		       br->multicast_ctx.multicast_router) ||
1674 	    nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
1675 		       br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
1676 	    nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1677 		       br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
1678 	    nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
1679 		       br->multicast_ctx.multicast_querier) ||
1680 	    nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1681 		       br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
1682 	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
1683 	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
1684 	    nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1685 			br->multicast_ctx.multicast_last_member_count) ||
1686 	    nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1687 			br->multicast_ctx.multicast_startup_query_count) ||
1688 	    nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
1689 		       br->multicast_ctx.multicast_igmp_version) ||
1690 	    br_multicast_dump_querier_state(skb, &br->multicast_ctx,
1691 					    IFLA_BR_MCAST_QUERIER_STATE))
1692 		return -EMSGSIZE;
1693 #if IS_ENABLED(CONFIG_IPV6)
1694 	if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
1695 		       br->multicast_ctx.multicast_mld_version))
1696 		return -EMSGSIZE;
1697 #endif
1698 	clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval);
1699 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
1700 			      IFLA_BR_PAD))
1701 		return -EMSGSIZE;
1702 	clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval);
1703 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
1704 			      IFLA_BR_PAD))
1705 		return -EMSGSIZE;
1706 	clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval);
1707 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
1708 			      IFLA_BR_PAD))
1709 		return -EMSGSIZE;
1710 	clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval);
1711 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
1712 			      IFLA_BR_PAD))
1713 		return -EMSGSIZE;
1714 	clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval);
1715 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
1716 			      IFLA_BR_PAD))
1717 		return -EMSGSIZE;
1718 	clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval);
1719 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
1720 			      IFLA_BR_PAD))
1721 		return -EMSGSIZE;
1722 #endif
1723 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1724 	if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1725 		       br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
1726 	    nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1727 		       br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
1728 	    nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1729 		       br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
1730 		return -EMSGSIZE;
1731 #endif
1732 
1733 	return 0;
1734 }
1735 
1736 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1737 {
1738 	struct net_bridge_port *p = NULL;
1739 	struct net_bridge_vlan_group *vg;
1740 	struct net_bridge_vlan *v;
1741 	struct net_bridge *br;
1742 	int numvls = 0;
1743 
1744 	switch (attr) {
1745 	case IFLA_STATS_LINK_XSTATS:
1746 		br = netdev_priv(dev);
1747 		vg = br_vlan_group(br);
1748 		break;
1749 	case IFLA_STATS_LINK_XSTATS_SLAVE:
1750 		p = br_port_get_rtnl(dev);
1751 		if (!p)
1752 			return 0;
1753 		vg = nbp_vlan_group(p);
1754 		break;
1755 	default:
1756 		return 0;
1757 	}
1758 
1759 	if (vg) {
1760 		/* we need to count all, even placeholder entries */
1761 		list_for_each_entry(v, &vg->vlan_list, vlist)
1762 			numvls++;
1763 	}
1764 
1765 	return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1766 	       nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
1767 	       (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
1768 	       nla_total_size(0);
1769 }
1770 
1771 static int br_fill_linkxstats(struct sk_buff *skb,
1772 			      const struct net_device *dev,
1773 			      int *prividx, int attr)
1774 {
1775 	struct nlattr *nla __maybe_unused;
1776 	struct net_bridge_port *p = NULL;
1777 	struct net_bridge_vlan_group *vg;
1778 	struct net_bridge_vlan *v;
1779 	struct net_bridge *br;
1780 	struct nlattr *nest;
1781 	int vl_idx = 0;
1782 
1783 	switch (attr) {
1784 	case IFLA_STATS_LINK_XSTATS:
1785 		br = netdev_priv(dev);
1786 		vg = br_vlan_group(br);
1787 		break;
1788 	case IFLA_STATS_LINK_XSTATS_SLAVE:
1789 		p = br_port_get_rtnl(dev);
1790 		if (!p)
1791 			return 0;
1792 		br = p->br;
1793 		vg = nbp_vlan_group(p);
1794 		break;
1795 	default:
1796 		return -EINVAL;
1797 	}
1798 
1799 	nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
1800 	if (!nest)
1801 		return -EMSGSIZE;
1802 
1803 	if (vg) {
1804 		u16 pvid;
1805 
1806 		pvid = br_get_pvid(vg);
1807 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1808 			struct bridge_vlan_xstats vxi;
1809 			struct pcpu_sw_netstats stats;
1810 
1811 			if (++vl_idx < *prividx)
1812 				continue;
1813 			memset(&vxi, 0, sizeof(vxi));
1814 			vxi.vid = v->vid;
1815 			vxi.flags = v->flags;
1816 			if (v->vid == pvid)
1817 				vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1818 			br_vlan_get_stats(v, &stats);
1819 			vxi.rx_bytes = u64_stats_read(&stats.rx_bytes);
1820 			vxi.rx_packets = u64_stats_read(&stats.rx_packets);
1821 			vxi.tx_bytes = u64_stats_read(&stats.tx_bytes);
1822 			vxi.tx_packets = u64_stats_read(&stats.tx_packets);
1823 
1824 			if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1825 				goto nla_put_failure;
1826 		}
1827 	}
1828 
1829 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1830 	if (++vl_idx >= *prividx) {
1831 		nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1832 					sizeof(struct br_mcast_stats),
1833 					BRIDGE_XSTATS_PAD);
1834 		if (!nla)
1835 			goto nla_put_failure;
1836 		br_multicast_get_stats(br, p, nla_data(nla));
1837 	}
1838 #endif
1839 
1840 	if (p) {
1841 		nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP,
1842 					sizeof(p->stp_xstats),
1843 					BRIDGE_XSTATS_PAD);
1844 		if (!nla)
1845 			goto nla_put_failure;
1846 
1847 		spin_lock_bh(&br->lock);
1848 		memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats));
1849 		spin_unlock_bh(&br->lock);
1850 	}
1851 
1852 	nla_nest_end(skb, nest);
1853 	*prividx = 0;
1854 
1855 	return 0;
1856 
1857 nla_put_failure:
1858 	nla_nest_end(skb, nest);
1859 	*prividx = vl_idx;
1860 
1861 	return -EMSGSIZE;
1862 }
1863 
1864 static struct rtnl_af_ops br_af_ops __read_mostly = {
1865 	.family			= AF_BRIDGE,
1866 	.get_link_af_size	= br_get_link_af_size_filtered,
1867 };
1868 
1869 struct rtnl_link_ops br_link_ops __read_mostly = {
1870 	.kind			= "bridge",
1871 	.priv_size		= sizeof(struct net_bridge),
1872 	.setup			= br_dev_setup,
1873 	.maxtype		= IFLA_BR_MAX,
1874 	.policy			= br_policy,
1875 	.validate		= br_validate,
1876 	.newlink		= br_dev_newlink,
1877 	.changelink		= br_changelink,
1878 	.dellink		= br_dev_delete,
1879 	.get_size		= br_get_size,
1880 	.fill_info		= br_fill_info,
1881 	.fill_linkxstats	= br_fill_linkxstats,
1882 	.get_linkxstats_size	= br_get_linkxstats_size,
1883 
1884 	.slave_maxtype		= IFLA_BRPORT_MAX,
1885 	.slave_policy		= br_port_policy,
1886 	.slave_changelink	= br_port_slave_changelink,
1887 	.get_slave_size		= br_port_get_slave_size,
1888 	.fill_slave_info	= br_port_fill_slave_info,
1889 };
1890 
1891 int __init br_netlink_init(void)
1892 {
1893 	int err;
1894 
1895 	br_vlan_rtnl_init();
1896 	rtnl_af_register(&br_af_ops);
1897 
1898 	err = rtnl_link_register(&br_link_ops);
1899 	if (err)
1900 		goto out_af;
1901 
1902 	return 0;
1903 
1904 out_af:
1905 	rtnl_af_unregister(&br_af_ops);
1906 	return err;
1907 }
1908 
1909 void br_netlink_fini(void)
1910 {
1911 	br_vlan_rtnl_uninit();
1912 	rtnl_af_unregister(&br_af_ops);
1913 	rtnl_link_unregister(&br_link_ops);
1914 }
1915