1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Bridge netlink control interface
4 *
5 * Authors:
6 * Stephen Hemminger <shemminger@osdl.org>
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/etherdevice.h>
12 #include <net/rtnetlink.h>
13 #include <net/net_namespace.h>
14 #include <net/sock.h>
15 #include <uapi/linux/if_bridge.h>
16
17 #include "br_private.h"
18 #include "br_private_stp.h"
19 #include "br_private_cfm.h"
20 #include "br_private_tunnel.h"
21 #include "br_private_mcast_eht.h"
22
__get_num_vlan_infos(struct net_bridge_vlan_group * vg,u32 filter_mask)23 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
24 u32 filter_mask)
25 {
26 struct net_bridge_vlan *v;
27 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
28 u16 flags, pvid;
29 int num_vlans = 0;
30
31 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
32 return 0;
33
34 pvid = br_get_pvid(vg);
35 /* Count number of vlan infos */
36 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
37 flags = 0;
38 /* only a context, bridge vlan not activated */
39 if (!br_vlan_should_use(v))
40 continue;
41 if (v->vid == pvid)
42 flags |= BRIDGE_VLAN_INFO_PVID;
43
44 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
45 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
46
47 if (vid_range_start == 0) {
48 goto initvars;
49 } else if ((v->vid - vid_range_end) == 1 &&
50 flags == vid_range_flags) {
51 vid_range_end = v->vid;
52 continue;
53 } else {
54 if ((vid_range_end - vid_range_start) > 0)
55 num_vlans += 2;
56 else
57 num_vlans += 1;
58 }
59 initvars:
60 vid_range_start = v->vid;
61 vid_range_end = v->vid;
62 vid_range_flags = flags;
63 }
64
65 if (vid_range_start != 0) {
66 if ((vid_range_end - vid_range_start) > 0)
67 num_vlans += 2;
68 else
69 num_vlans += 1;
70 }
71
72 return num_vlans;
73 }
74
br_get_num_vlan_infos(struct net_bridge_vlan_group * vg,u32 filter_mask)75 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
76 u32 filter_mask)
77 {
78 int num_vlans;
79
80 if (!vg)
81 return 0;
82
83 if (filter_mask & RTEXT_FILTER_BRVLAN)
84 return vg->num_vlans;
85
86 rcu_read_lock();
87 num_vlans = __get_num_vlan_infos(vg, filter_mask);
88 rcu_read_unlock();
89
90 return num_vlans;
91 }
92
br_get_link_af_size_filtered(const struct net_device * dev,u32 filter_mask)93 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
94 u32 filter_mask)
95 {
96 struct net_bridge_vlan_group *vg = NULL;
97 struct net_bridge_port *p = NULL;
98 struct net_bridge *br = NULL;
99 u32 num_cfm_peer_mep_infos;
100 u32 num_cfm_mep_infos;
101 size_t vinfo_sz = 0;
102 int num_vlan_infos;
103
104 rcu_read_lock();
105 if (netif_is_bridge_port(dev)) {
106 p = br_port_get_check_rcu(dev);
107 if (p)
108 vg = nbp_vlan_group_rcu(p);
109 } else if (netif_is_bridge_master(dev)) {
110 br = netdev_priv(dev);
111 vg = br_vlan_group_rcu(br);
112 }
113 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
114 rcu_read_unlock();
115
116 if (p && (p->flags & BR_VLAN_TUNNEL))
117 vinfo_sz += br_get_vlan_tunnel_info_size(vg);
118
119 /* Each VLAN is returned in bridge_vlan_info along with flags */
120 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
121
122 if (p && vg && (filter_mask & RTEXT_FILTER_MST))
123 vinfo_sz += br_mst_info_size(vg);
124
125 if (!(filter_mask & RTEXT_FILTER_CFM_STATUS))
126 return vinfo_sz;
127
128 if (!br)
129 return vinfo_sz;
130
131 /* CFM status info must be added */
132 br_cfm_mep_count(br, &num_cfm_mep_infos);
133 br_cfm_peer_mep_count(br, &num_cfm_peer_mep_infos);
134
135 vinfo_sz += nla_total_size(0); /* IFLA_BRIDGE_CFM */
136 /* For each status struct the MEP instance (u32) is added */
137 /* MEP instance (u32) + br_cfm_mep_status */
138 vinfo_sz += num_cfm_mep_infos *
139 /*IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE */
140 (nla_total_size(sizeof(u32))
141 /* IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN */
142 + nla_total_size(sizeof(u32))
143 /* IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN */
144 + nla_total_size(sizeof(u32))
145 /* IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN */
146 + nla_total_size(sizeof(u32)));
147 /* MEP instance (u32) + br_cfm_cc_peer_status */
148 vinfo_sz += num_cfm_peer_mep_infos *
149 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE */
150 (nla_total_size(sizeof(u32))
151 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID */
152 + nla_total_size(sizeof(u32))
153 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT */
154 + nla_total_size(sizeof(u32))
155 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI */
156 + nla_total_size(sizeof(u32))
157 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE */
158 + nla_total_size(sizeof(u8))
159 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE */
160 + nla_total_size(sizeof(u8))
161 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN */
162 + nla_total_size(sizeof(u32))
163 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN */
164 + nla_total_size(sizeof(u32))
165 /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN */
166 + nla_total_size(sizeof(u32)));
167
168 return vinfo_sz;
169 }
170
br_port_info_size(void)171 static inline size_t br_port_info_size(void)
172 {
173 return nla_total_size(1) /* IFLA_BRPORT_STATE */
174 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
175 + nla_total_size(4) /* IFLA_BRPORT_COST */
176 + nla_total_size(1) /* IFLA_BRPORT_MODE */
177 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
178 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
179 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
180 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
181 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
182 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
183 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
184 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
185 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
186 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
187 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
188 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */
189 + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */
190 + nla_total_size(1) /* IFLA_BRPORT_LOCKED */
191 + nla_total_size(1) /* IFLA_BRPORT_MAB */
192 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_VLAN_SUPPRESS */
193 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
194 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
195 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
196 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
197 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
198 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
199 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
200 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
201 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
202 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
203 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
204 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
205 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
206 + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_N_GROUPS */
207 + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_MAX_GROUPS */
208 #endif
209 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */
210 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */
211 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */
212 + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT */
213 + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_CNT */
214 + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_BACKUP_NHID */
215 + 0;
216 }
217
br_nlmsg_size(struct net_device * dev,u32 filter_mask)218 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
219 {
220 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
221 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
222 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
223 + nla_total_size(4) /* IFLA_MASTER */
224 + nla_total_size(4) /* IFLA_MTU */
225 + nla_total_size(4) /* IFLA_LINK */
226 + nla_total_size(1) /* IFLA_OPERSTATE */
227 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
228 + nla_total_size(br_get_link_af_size_filtered(dev,
229 filter_mask)) /* IFLA_AF_SPEC */
230 + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
231 }
232
br_port_fill_attrs(struct sk_buff * skb,const struct net_bridge_port * p)233 static int br_port_fill_attrs(struct sk_buff *skb,
234 const struct net_bridge_port *p)
235 {
236 u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
237 struct net_bridge_port *backup_p;
238 u64 timerval;
239
240 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
241 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
242 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
243 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
244 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
245 nla_put_u8(skb, IFLA_BRPORT_PROTECT,
246 !!(p->flags & BR_ROOT_BLOCK)) ||
247 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
248 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
249 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
250 !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
251 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
252 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
253 !!(p->flags & BR_FLOOD)) ||
254 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
255 !!(p->flags & BR_MCAST_FLOOD)) ||
256 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
257 !!(p->flags & BR_BCAST_FLOOD)) ||
258 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
259 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
260 !!(p->flags & BR_PROXYARP_WIFI)) ||
261 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
262 &p->designated_root) ||
263 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
264 &p->designated_bridge) ||
265 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
266 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
267 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
268 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
269 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
270 p->topology_change_ack) ||
271 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
272 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
273 BR_VLAN_TUNNEL)) ||
274 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
275 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
276 !!(p->flags & BR_NEIGH_SUPPRESS)) ||
277 nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags &
278 BR_MRP_LOST_CONT)) ||
279 nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN,
280 !!(p->flags & BR_MRP_LOST_IN_CONT)) ||
281 nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) ||
282 nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)) ||
283 nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB)) ||
284 nla_put_u8(skb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
285 !!(p->flags & BR_NEIGH_VLAN_SUPPRESS)))
286 return -EMSGSIZE;
287
288 timerval = br_timer_value(&p->message_age_timer);
289 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
290 IFLA_BRPORT_PAD))
291 return -EMSGSIZE;
292 timerval = br_timer_value(&p->forward_delay_timer);
293 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
294 IFLA_BRPORT_PAD))
295 return -EMSGSIZE;
296 timerval = br_timer_value(&p->hold_timer);
297 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
298 IFLA_BRPORT_PAD))
299 return -EMSGSIZE;
300
301 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
302 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
303 p->multicast_ctx.multicast_router) ||
304 nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
305 p->multicast_eht_hosts_limit) ||
306 nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
307 p->multicast_eht_hosts_cnt) ||
308 nla_put_u32(skb, IFLA_BRPORT_MCAST_N_GROUPS,
309 br_multicast_ngroups_get(&p->multicast_ctx)) ||
310 nla_put_u32(skb, IFLA_BRPORT_MCAST_MAX_GROUPS,
311 br_multicast_ngroups_get_max(&p->multicast_ctx)))
312 return -EMSGSIZE;
313 #endif
314
315 /* we might be called only with br->lock */
316 rcu_read_lock();
317 backup_p = rcu_dereference(p->backup_port);
318 if (backup_p)
319 nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
320 backup_p->dev->ifindex);
321 rcu_read_unlock();
322
323 if (p->backup_nhid &&
324 nla_put_u32(skb, IFLA_BRPORT_BACKUP_NHID, p->backup_nhid))
325 return -EMSGSIZE;
326
327 return 0;
328 }
329
br_fill_ifvlaninfo_range(struct sk_buff * skb,u16 vid_start,u16 vid_end,u16 flags)330 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
331 u16 vid_end, u16 flags)
332 {
333 struct bridge_vlan_info vinfo;
334
335 if ((vid_end - vid_start) > 0) {
336 /* add range to skb */
337 vinfo.vid = vid_start;
338 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
339 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
340 sizeof(vinfo), &vinfo))
341 goto nla_put_failure;
342
343 vinfo.vid = vid_end;
344 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
345 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
346 sizeof(vinfo), &vinfo))
347 goto nla_put_failure;
348 } else {
349 vinfo.vid = vid_start;
350 vinfo.flags = flags;
351 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
352 sizeof(vinfo), &vinfo))
353 goto nla_put_failure;
354 }
355
356 return 0;
357
358 nla_put_failure:
359 return -EMSGSIZE;
360 }
361
br_fill_ifvlaninfo_compressed(struct sk_buff * skb,struct net_bridge_vlan_group * vg)362 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
363 struct net_bridge_vlan_group *vg)
364 {
365 struct net_bridge_vlan *v;
366 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
367 u16 flags, pvid;
368 int err = 0;
369
370 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
371 * and mark vlan info with begin and end flags
372 * if vlaninfo represents a range
373 */
374 pvid = br_get_pvid(vg);
375 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
376 flags = 0;
377 if (!br_vlan_should_use(v))
378 continue;
379 if (v->vid == pvid)
380 flags |= BRIDGE_VLAN_INFO_PVID;
381
382 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
383 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
384
385 if (vid_range_start == 0) {
386 goto initvars;
387 } else if ((v->vid - vid_range_end) == 1 &&
388 flags == vid_range_flags) {
389 vid_range_end = v->vid;
390 continue;
391 } else {
392 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
393 vid_range_end,
394 vid_range_flags);
395 if (err)
396 return err;
397 }
398
399 initvars:
400 vid_range_start = v->vid;
401 vid_range_end = v->vid;
402 vid_range_flags = flags;
403 }
404
405 if (vid_range_start != 0) {
406 /* Call it once more to send any left over vlans */
407 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
408 vid_range_end,
409 vid_range_flags);
410 if (err)
411 return err;
412 }
413
414 return 0;
415 }
416
br_fill_ifvlaninfo(struct sk_buff * skb,struct net_bridge_vlan_group * vg)417 static int br_fill_ifvlaninfo(struct sk_buff *skb,
418 struct net_bridge_vlan_group *vg)
419 {
420 struct bridge_vlan_info vinfo;
421 struct net_bridge_vlan *v;
422 u16 pvid;
423
424 pvid = br_get_pvid(vg);
425 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
426 if (!br_vlan_should_use(v))
427 continue;
428
429 vinfo.vid = v->vid;
430 vinfo.flags = 0;
431 if (v->vid == pvid)
432 vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
433
434 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
435 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
436
437 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
438 sizeof(vinfo), &vinfo))
439 goto nla_put_failure;
440 }
441
442 return 0;
443
444 nla_put_failure:
445 return -EMSGSIZE;
446 }
447
448 /*
449 * Create one netlink message for one interface
450 * Contains port and master info as well as carrier and bridge state.
451 */
br_fill_ifinfo(struct sk_buff * skb,const struct net_bridge_port * port,u32 pid,u32 seq,int event,unsigned int flags,u32 filter_mask,const struct net_device * dev,bool getlink)452 static int br_fill_ifinfo(struct sk_buff *skb,
453 const struct net_bridge_port *port,
454 u32 pid, u32 seq, int event, unsigned int flags,
455 u32 filter_mask, const struct net_device *dev,
456 bool getlink)
457 {
458 u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) :
459 IF_OPER_DOWN;
460 struct nlattr *af = NULL;
461 struct net_bridge *br;
462 struct ifinfomsg *hdr;
463 struct nlmsghdr *nlh;
464
465 if (port)
466 br = port->br;
467 else
468 br = netdev_priv(dev);
469
470 br_debug(br, "br_fill_info event %d port %s master %s\n",
471 event, dev->name, br->dev->name);
472
473 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
474 if (nlh == NULL)
475 return -EMSGSIZE;
476
477 hdr = nlmsg_data(nlh);
478 hdr->ifi_family = AF_BRIDGE;
479 hdr->__ifi_pad = 0;
480 hdr->ifi_type = dev->type;
481 hdr->ifi_index = dev->ifindex;
482 hdr->ifi_flags = dev_get_flags(dev);
483 hdr->ifi_change = 0;
484
485 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
486 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
487 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
488 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
489 (dev->addr_len &&
490 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
491 (dev->ifindex != dev_get_iflink(dev) &&
492 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
493 goto nla_put_failure;
494
495 if (event == RTM_NEWLINK && port) {
496 struct nlattr *nest;
497
498 nest = nla_nest_start(skb, IFLA_PROTINFO);
499 if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
500 goto nla_put_failure;
501 nla_nest_end(skb, nest);
502 }
503
504 if (filter_mask & (RTEXT_FILTER_BRVLAN |
505 RTEXT_FILTER_BRVLAN_COMPRESSED |
506 RTEXT_FILTER_MRP |
507 RTEXT_FILTER_CFM_CONFIG |
508 RTEXT_FILTER_CFM_STATUS |
509 RTEXT_FILTER_MST)) {
510 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
511 if (!af)
512 goto nla_put_failure;
513 }
514
515 /* Check if the VID information is requested */
516 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
517 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
518 struct net_bridge_vlan_group *vg;
519 int err;
520
521 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
522 rcu_read_lock();
523 if (port)
524 vg = nbp_vlan_group_rcu(port);
525 else
526 vg = br_vlan_group_rcu(br);
527
528 if (!vg || !vg->num_vlans) {
529 rcu_read_unlock();
530 goto done;
531 }
532 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
533 err = br_fill_ifvlaninfo_compressed(skb, vg);
534 else
535 err = br_fill_ifvlaninfo(skb, vg);
536
537 if (port && (port->flags & BR_VLAN_TUNNEL))
538 err = br_fill_vlan_tunnel_info(skb, vg);
539 rcu_read_unlock();
540 if (err)
541 goto nla_put_failure;
542 }
543
544 if (filter_mask & RTEXT_FILTER_MRP) {
545 int err;
546
547 if (!br_mrp_enabled(br) || port)
548 goto done;
549
550 rcu_read_lock();
551 err = br_mrp_fill_info(skb, br);
552 rcu_read_unlock();
553
554 if (err)
555 goto nla_put_failure;
556 }
557
558 if (filter_mask & (RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS)) {
559 struct nlattr *cfm_nest = NULL;
560 int err;
561
562 if (!br_cfm_created(br) || port)
563 goto done;
564
565 cfm_nest = nla_nest_start(skb, IFLA_BRIDGE_CFM);
566 if (!cfm_nest)
567 goto nla_put_failure;
568
569 if (filter_mask & RTEXT_FILTER_CFM_CONFIG) {
570 rcu_read_lock();
571 err = br_cfm_config_fill_info(skb, br);
572 rcu_read_unlock();
573 if (err)
574 goto nla_put_failure;
575 }
576
577 if (filter_mask & RTEXT_FILTER_CFM_STATUS) {
578 rcu_read_lock();
579 err = br_cfm_status_fill_info(skb, br, getlink);
580 rcu_read_unlock();
581 if (err)
582 goto nla_put_failure;
583 }
584
585 nla_nest_end(skb, cfm_nest);
586 }
587
588 if ((filter_mask & RTEXT_FILTER_MST) &&
589 br_opt_get(br, BROPT_MST_ENABLED) && port) {
590 const struct net_bridge_vlan_group *vg = nbp_vlan_group(port);
591 struct nlattr *mst_nest;
592 int err;
593
594 if (!vg || !vg->num_vlans)
595 goto done;
596
597 mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST);
598 if (!mst_nest)
599 goto nla_put_failure;
600
601 err = br_mst_fill_info(skb, vg);
602 if (err)
603 goto nla_put_failure;
604
605 nla_nest_end(skb, mst_nest);
606 }
607
608 done:
609 if (af) {
610 if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
611 nla_nest_end(skb, af);
612 else
613 nla_nest_cancel(skb, af);
614 }
615
616 nlmsg_end(skb, nlh);
617 return 0;
618
619 nla_put_failure:
620 nlmsg_cancel(skb, nlh);
621 return -EMSGSIZE;
622 }
623
br_info_notify(int event,const struct net_bridge * br,const struct net_bridge_port * port,u32 filter)624 void br_info_notify(int event, const struct net_bridge *br,
625 const struct net_bridge_port *port, u32 filter)
626 {
627 struct net_device *dev;
628 struct sk_buff *skb;
629 int err = -ENOBUFS;
630 struct net *net;
631 u16 port_no = 0;
632
633 if (WARN_ON(!port && !br))
634 return;
635
636 if (port) {
637 dev = port->dev;
638 br = port->br;
639 port_no = port->port_no;
640 } else {
641 dev = br->dev;
642 }
643
644 net = dev_net(dev);
645 br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
646
647 skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
648 if (skb == NULL)
649 goto errout;
650
651 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev, false);
652 if (err < 0) {
653 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
654 WARN_ON(err == -EMSGSIZE);
655 kfree_skb(skb);
656 goto errout;
657 }
658 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
659 return;
660 errout:
661 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
662 }
663
664 /* Notify listeners of a change in bridge or port information */
br_ifinfo_notify(int event,const struct net_bridge * br,const struct net_bridge_port * port)665 void br_ifinfo_notify(int event, const struct net_bridge *br,
666 const struct net_bridge_port *port)
667 {
668 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
669
670 br_info_notify(event, br, port, filter);
671 }
672
673 /*
674 * Dump information about all ports, in response to GETLINK
675 */
br_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)676 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
677 struct net_device *dev, u32 filter_mask, int nlflags)
678 {
679 struct net_bridge_port *port = br_port_get_rtnl(dev);
680
681 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
682 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) &&
683 !(filter_mask & RTEXT_FILTER_MRP) &&
684 !(filter_mask & RTEXT_FILTER_CFM_CONFIG) &&
685 !(filter_mask & RTEXT_FILTER_CFM_STATUS))
686 return 0;
687
688 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
689 filter_mask, dev, true);
690 }
691
br_vlan_info(struct net_bridge * br,struct net_bridge_port * p,int cmd,struct bridge_vlan_info * vinfo,bool * changed,struct netlink_ext_ack * extack)692 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
693 int cmd, struct bridge_vlan_info *vinfo, bool *changed,
694 struct netlink_ext_ack *extack)
695 {
696 bool curr_change;
697 int err = 0;
698
699 switch (cmd) {
700 case RTM_SETLINK:
701 if (p) {
702 /* if the MASTER flag is set this will act on the global
703 * per-VLAN entry as well
704 */
705 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
706 &curr_change, extack);
707 } else {
708 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
709 err = br_vlan_add(br, vinfo->vid, vinfo->flags,
710 &curr_change, extack);
711 }
712 if (curr_change)
713 *changed = true;
714 break;
715
716 case RTM_DELLINK:
717 if (p) {
718 if (!nbp_vlan_delete(p, vinfo->vid))
719 *changed = true;
720
721 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
722 !br_vlan_delete(p->br, vinfo->vid))
723 *changed = true;
724 } else if (!br_vlan_delete(br, vinfo->vid)) {
725 *changed = true;
726 }
727 break;
728 }
729
730 return err;
731 }
732
br_process_vlan_info(struct net_bridge * br,struct net_bridge_port * p,int cmd,struct bridge_vlan_info * vinfo_curr,struct bridge_vlan_info ** vinfo_last,bool * changed,struct netlink_ext_ack * extack)733 int br_process_vlan_info(struct net_bridge *br,
734 struct net_bridge_port *p, int cmd,
735 struct bridge_vlan_info *vinfo_curr,
736 struct bridge_vlan_info **vinfo_last,
737 bool *changed,
738 struct netlink_ext_ack *extack)
739 {
740 int err, rtm_cmd;
741
742 if (!br_vlan_valid_id(vinfo_curr->vid, extack))
743 return -EINVAL;
744
745 /* needed for vlan-only NEWVLAN/DELVLAN notifications */
746 rtm_cmd = br_afspec_cmd_to_rtm(cmd);
747
748 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
749 if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
750 return -EINVAL;
751 *vinfo_last = vinfo_curr;
752 return 0;
753 }
754
755 if (*vinfo_last) {
756 struct bridge_vlan_info tmp_vinfo;
757 int v, v_change_start = 0;
758
759 if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
760 return -EINVAL;
761
762 memcpy(&tmp_vinfo, *vinfo_last,
763 sizeof(struct bridge_vlan_info));
764 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
765 bool curr_change = false;
766
767 tmp_vinfo.vid = v;
768 err = br_vlan_info(br, p, cmd, &tmp_vinfo, &curr_change,
769 extack);
770 if (err)
771 break;
772 if (curr_change) {
773 *changed = curr_change;
774 if (!v_change_start)
775 v_change_start = v;
776 } else {
777 /* nothing to notify yet */
778 if (!v_change_start)
779 continue;
780 br_vlan_notify(br, p, v_change_start,
781 v - 1, rtm_cmd);
782 v_change_start = 0;
783 }
784 cond_resched();
785 }
786 /* v_change_start is set only if the last/whole range changed */
787 if (v_change_start)
788 br_vlan_notify(br, p, v_change_start,
789 v - 1, rtm_cmd);
790
791 *vinfo_last = NULL;
792
793 return err;
794 }
795
796 err = br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
797 if (*changed)
798 br_vlan_notify(br, p, vinfo_curr->vid, 0, rtm_cmd);
799
800 return err;
801 }
802
br_afspec(struct net_bridge * br,struct net_bridge_port * p,struct nlattr * af_spec,int cmd,bool * changed,struct netlink_ext_ack * extack)803 static int br_afspec(struct net_bridge *br,
804 struct net_bridge_port *p,
805 struct nlattr *af_spec,
806 int cmd, bool *changed,
807 struct netlink_ext_ack *extack)
808 {
809 struct bridge_vlan_info *vinfo_curr = NULL;
810 struct bridge_vlan_info *vinfo_last = NULL;
811 struct nlattr *attr;
812 struct vtunnel_info tinfo_last = {};
813 struct vtunnel_info tinfo_curr = {};
814 int err = 0, rem;
815
816 nla_for_each_nested(attr, af_spec, rem) {
817 err = 0;
818 switch (nla_type(attr)) {
819 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
820 if (!p || !(p->flags & BR_VLAN_TUNNEL))
821 return -EINVAL;
822 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
823 if (err)
824 return err;
825 err = br_process_vlan_tunnel_info(br, p, cmd,
826 &tinfo_curr,
827 &tinfo_last,
828 changed);
829 if (err)
830 return err;
831 break;
832 case IFLA_BRIDGE_VLAN_INFO:
833 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
834 return -EINVAL;
835 vinfo_curr = nla_data(attr);
836 err = br_process_vlan_info(br, p, cmd, vinfo_curr,
837 &vinfo_last, changed,
838 extack);
839 if (err)
840 return err;
841 break;
842 case IFLA_BRIDGE_MRP:
843 err = br_mrp_parse(br, p, attr, cmd, extack);
844 if (err)
845 return err;
846 break;
847 case IFLA_BRIDGE_CFM:
848 err = br_cfm_parse(br, p, attr, cmd, extack);
849 if (err)
850 return err;
851 break;
852 case IFLA_BRIDGE_MST:
853 if (!p) {
854 NL_SET_ERR_MSG(extack,
855 "MST states can only be set on bridge ports");
856 return -EINVAL;
857 }
858
859 if (cmd != RTM_SETLINK) {
860 NL_SET_ERR_MSG(extack,
861 "MST states can only be set through RTM_SETLINK");
862 return -EINVAL;
863 }
864
865 err = br_mst_process(p, attr, extack);
866 if (err)
867 return err;
868 break;
869 }
870 }
871
872 return err;
873 }
874
875 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
876 [IFLA_BRPORT_UNSPEC] = { .strict_start_type =
877 IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + 1 },
878 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
879 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
880 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
881 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
882 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
883 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
884 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
885 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
886 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
887 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
888 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
889 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
890 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
891 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
892 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
893 [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
894 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
895 [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
896 [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
897 [IFLA_BRPORT_LOCKED] = { .type = NLA_U8 },
898 [IFLA_BRPORT_MAB] = { .type = NLA_U8 },
899 [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
900 [IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 },
901 [IFLA_BRPORT_MCAST_N_GROUPS] = { .type = NLA_REJECT },
902 [IFLA_BRPORT_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
903 [IFLA_BRPORT_NEIGH_VLAN_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1),
904 [IFLA_BRPORT_BACKUP_NHID] = { .type = NLA_U32 },
905 };
906
907 /* Change the state of the port and notify spanning tree */
br_set_port_state(struct net_bridge_port * p,u8 state)908 static int br_set_port_state(struct net_bridge_port *p, u8 state)
909 {
910 if (state > BR_STATE_BLOCKING)
911 return -EINVAL;
912
913 /* if kernel STP is running, don't allow changes */
914 if (p->br->stp_enabled == BR_KERNEL_STP)
915 return -EBUSY;
916
917 /* if device is not up, change is not allowed
918 * if link is not present, only allowable state is disabled
919 */
920 if (!netif_running(p->dev) ||
921 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
922 return -ENETDOWN;
923
924 br_set_state(p, state);
925 br_port_state_selection(p->br);
926 return 0;
927 }
928
929 /* Set/clear or port flags based on attribute */
br_set_port_flag(struct net_bridge_port * p,struct nlattr * tb[],int attrtype,unsigned long mask)930 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
931 int attrtype, unsigned long mask)
932 {
933 if (!tb[attrtype])
934 return;
935
936 if (nla_get_u8(tb[attrtype]))
937 p->flags |= mask;
938 else
939 p->flags &= ~mask;
940 }
941
942 /* Process bridge protocol info on port */
br_setport(struct net_bridge_port * p,struct nlattr * tb[],struct netlink_ext_ack * extack)943 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[],
944 struct netlink_ext_ack *extack)
945 {
946 unsigned long old_flags, changed_mask;
947 bool br_vlan_tunnel_old;
948 int err;
949
950 old_flags = p->flags;
951 br_vlan_tunnel_old = (old_flags & BR_VLAN_TUNNEL) ? true : false;
952
953 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
954 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
955 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE,
956 BR_MULTICAST_FAST_LEAVE);
957 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
958 br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
959 br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
960 br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
961 br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST,
962 BR_MULTICAST_TO_UNICAST);
963 br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
964 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
965 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
966 br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
967 br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS);
968 br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
969 br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED);
970 br_set_port_flag(p, tb, IFLA_BRPORT_MAB, BR_PORT_MAB);
971 br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
972 BR_NEIGH_VLAN_SUPPRESS);
973
974 if ((p->flags & BR_PORT_MAB) &&
975 (!(p->flags & BR_PORT_LOCKED) || !(p->flags & BR_LEARNING))) {
976 NL_SET_ERR_MSG(extack, "Bridge port must be locked and have learning enabled when MAB is enabled");
977 p->flags = old_flags;
978 return -EINVAL;
979 } else if (!(p->flags & BR_PORT_MAB) && (old_flags & BR_PORT_MAB)) {
980 struct net_bridge_fdb_flush_desc desc = {
981 .flags = BIT(BR_FDB_LOCKED),
982 .flags_mask = BIT(BR_FDB_LOCKED),
983 .port_ifindex = p->dev->ifindex,
984 };
985
986 br_fdb_flush(p->br, &desc);
987 }
988
989 changed_mask = old_flags ^ p->flags;
990
991 err = br_switchdev_set_port_flag(p, p->flags, changed_mask, extack);
992 if (err) {
993 p->flags = old_flags;
994 return err;
995 }
996
997 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
998 nbp_vlan_tunnel_info_flush(p);
999
1000 br_port_flags_change(p, changed_mask);
1001
1002 if (tb[IFLA_BRPORT_COST]) {
1003 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
1004 if (err)
1005 return err;
1006 }
1007
1008 if (tb[IFLA_BRPORT_PRIORITY]) {
1009 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
1010 if (err)
1011 return err;
1012 }
1013
1014 if (tb[IFLA_BRPORT_STATE]) {
1015 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
1016 if (err)
1017 return err;
1018 }
1019
1020 if (tb[IFLA_BRPORT_FLUSH])
1021 br_fdb_delete_by_port(p->br, p, 0, 0);
1022
1023 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1024 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
1025 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
1026
1027 err = br_multicast_set_port_router(&p->multicast_ctx,
1028 mcast_router);
1029 if (err)
1030 return err;
1031 }
1032
1033 if (tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]) {
1034 u32 hlimit;
1035
1036 hlimit = nla_get_u32(tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]);
1037 err = br_multicast_eht_set_hosts_limit(p, hlimit);
1038 if (err)
1039 return err;
1040 }
1041
1042 if (tb[IFLA_BRPORT_MCAST_MAX_GROUPS]) {
1043 u32 max_groups;
1044
1045 max_groups = nla_get_u32(tb[IFLA_BRPORT_MCAST_MAX_GROUPS]);
1046 br_multicast_ngroups_set_max(&p->multicast_ctx, max_groups);
1047 }
1048 #endif
1049
1050 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
1051 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
1052
1053 if (fwd_mask & BR_GROUPFWD_MACPAUSE)
1054 return -EINVAL;
1055 p->group_fwd_mask = fwd_mask;
1056 }
1057
1058 if (tb[IFLA_BRPORT_BACKUP_PORT]) {
1059 struct net_device *backup_dev = NULL;
1060 u32 backup_ifindex;
1061
1062 backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
1063 if (backup_ifindex) {
1064 backup_dev = __dev_get_by_index(dev_net(p->dev),
1065 backup_ifindex);
1066 if (!backup_dev)
1067 return -ENOENT;
1068 }
1069
1070 err = nbp_backup_change(p, backup_dev);
1071 if (err)
1072 return err;
1073 }
1074
1075 if (tb[IFLA_BRPORT_BACKUP_NHID]) {
1076 u32 backup_nhid = nla_get_u32(tb[IFLA_BRPORT_BACKUP_NHID]);
1077
1078 WRITE_ONCE(p->backup_nhid, backup_nhid);
1079 }
1080
1081 return 0;
1082 }
1083
1084 /* Change state and parameters on port. */
br_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)1085 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
1086 struct netlink_ext_ack *extack)
1087 {
1088 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
1089 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
1090 struct net_bridge_port *p;
1091 struct nlattr *protinfo;
1092 struct nlattr *afspec;
1093 bool changed = false;
1094 int err = 0;
1095
1096 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
1097 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
1098 if (!protinfo && !afspec)
1099 return 0;
1100
1101 p = br_port_get_rtnl(dev);
1102 /* We want to accept dev as bridge itself if the AF_SPEC
1103 * is set to see if someone is setting vlan info on the bridge
1104 */
1105 if (!p && !afspec)
1106 return -EINVAL;
1107
1108 if (p && protinfo) {
1109 if (protinfo->nla_type & NLA_F_NESTED) {
1110 err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
1111 protinfo,
1112 br_port_policy,
1113 NULL);
1114 if (err)
1115 return err;
1116
1117 spin_lock_bh(&p->br->lock);
1118 err = br_setport(p, tb, extack);
1119 spin_unlock_bh(&p->br->lock);
1120 } else {
1121 /* Binary compatibility with old RSTP */
1122 if (nla_len(protinfo) < sizeof(u8))
1123 return -EINVAL;
1124
1125 spin_lock_bh(&p->br->lock);
1126 err = br_set_port_state(p, nla_get_u8(protinfo));
1127 spin_unlock_bh(&p->br->lock);
1128 }
1129 if (err)
1130 goto out;
1131 changed = true;
1132 }
1133
1134 if (afspec)
1135 err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
1136
1137 if (changed)
1138 br_ifinfo_notify(RTM_NEWLINK, br, p);
1139 out:
1140 return err;
1141 }
1142
1143 /* Delete port information */
br_dellink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags)1144 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
1145 {
1146 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
1147 struct net_bridge_port *p;
1148 struct nlattr *afspec;
1149 bool changed = false;
1150 int err = 0;
1151
1152 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
1153 if (!afspec)
1154 return 0;
1155
1156 p = br_port_get_rtnl(dev);
1157 /* We want to accept dev as bridge itself as well */
1158 if (!p && !netif_is_bridge_master(dev))
1159 return -EINVAL;
1160
1161 err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
1162 if (changed)
1163 /* Send RTM_NEWLINK because userspace
1164 * expects RTM_NEWLINK for vlan dels
1165 */
1166 br_ifinfo_notify(RTM_NEWLINK, br, p);
1167
1168 return err;
1169 }
1170
br_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1171 static int br_validate(struct nlattr *tb[], struct nlattr *data[],
1172 struct netlink_ext_ack *extack)
1173 {
1174 if (tb[IFLA_ADDRESS]) {
1175 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1176 return -EINVAL;
1177 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1178 return -EADDRNOTAVAIL;
1179 }
1180
1181 if (!data)
1182 return 0;
1183
1184 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1185 if (data[IFLA_BR_VLAN_PROTOCOL] &&
1186 !eth_type_vlan(nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])))
1187 return -EPROTONOSUPPORT;
1188
1189 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
1190 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
1191
1192 if (defpvid >= VLAN_VID_MASK)
1193 return -EINVAL;
1194 }
1195 #endif
1196
1197 return 0;
1198 }
1199
br_port_slave_changelink(struct net_device * brdev,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1200 static int br_port_slave_changelink(struct net_device *brdev,
1201 struct net_device *dev,
1202 struct nlattr *tb[],
1203 struct nlattr *data[],
1204 struct netlink_ext_ack *extack)
1205 {
1206 struct net_bridge *br = netdev_priv(brdev);
1207 int ret;
1208
1209 if (!data)
1210 return 0;
1211
1212 spin_lock_bh(&br->lock);
1213 ret = br_setport(br_port_get_rtnl(dev), data, extack);
1214 spin_unlock_bh(&br->lock);
1215
1216 return ret;
1217 }
1218
br_port_fill_slave_info(struct sk_buff * skb,const struct net_device * brdev,const struct net_device * dev)1219 static int br_port_fill_slave_info(struct sk_buff *skb,
1220 const struct net_device *brdev,
1221 const struct net_device *dev)
1222 {
1223 return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
1224 }
1225
br_port_get_slave_size(const struct net_device * brdev,const struct net_device * dev)1226 static size_t br_port_get_slave_size(const struct net_device *brdev,
1227 const struct net_device *dev)
1228 {
1229 return br_port_info_size();
1230 }
1231
1232 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
1233 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
1234 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
1235 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
1236 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
1237 [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
1238 [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
1239 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
1240 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
1241 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
1242 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
1243 .len = ETH_ALEN },
1244 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
1245 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
1246 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
1247 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
1248 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
1249 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
1250 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
1251 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
1252 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
1253 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
1254 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
1255 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
1256 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
1257 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
1258 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
1259 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
1260 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
1261 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
1262 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
1263 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
1264 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
1265 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
1266 [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
1267 [IFLA_BR_MULTI_BOOLOPT] =
1268 NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)),
1269 };
1270
br_changelink(struct net_device * brdev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1271 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1272 struct nlattr *data[],
1273 struct netlink_ext_ack *extack)
1274 {
1275 struct net_bridge *br = netdev_priv(brdev);
1276 int err;
1277
1278 if (!data)
1279 return 0;
1280
1281 if (data[IFLA_BR_FORWARD_DELAY]) {
1282 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
1283 if (err)
1284 return err;
1285 }
1286
1287 if (data[IFLA_BR_HELLO_TIME]) {
1288 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
1289 if (err)
1290 return err;
1291 }
1292
1293 if (data[IFLA_BR_MAX_AGE]) {
1294 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
1295 if (err)
1296 return err;
1297 }
1298
1299 if (data[IFLA_BR_AGEING_TIME]) {
1300 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
1301 if (err)
1302 return err;
1303 }
1304
1305 if (data[IFLA_BR_STP_STATE]) {
1306 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
1307
1308 err = br_stp_set_enabled(br, stp_enabled, extack);
1309 if (err)
1310 return err;
1311 }
1312
1313 if (data[IFLA_BR_PRIORITY]) {
1314 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
1315
1316 br_stp_set_bridge_priority(br, priority);
1317 }
1318
1319 if (data[IFLA_BR_VLAN_FILTERING]) {
1320 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
1321
1322 err = br_vlan_filter_toggle(br, vlan_filter, extack);
1323 if (err)
1324 return err;
1325 }
1326
1327 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1328 if (data[IFLA_BR_VLAN_PROTOCOL]) {
1329 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
1330
1331 err = __br_vlan_set_proto(br, vlan_proto, extack);
1332 if (err)
1333 return err;
1334 }
1335
1336 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
1337 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
1338
1339 err = __br_vlan_set_default_pvid(br, defpvid, extack);
1340 if (err)
1341 return err;
1342 }
1343
1344 if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
1345 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
1346
1347 err = br_vlan_set_stats(br, vlan_stats);
1348 if (err)
1349 return err;
1350 }
1351
1352 if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
1353 __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
1354
1355 err = br_vlan_set_stats_per_port(br, per_port);
1356 if (err)
1357 return err;
1358 }
1359 #endif
1360
1361 if (data[IFLA_BR_GROUP_FWD_MASK]) {
1362 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
1363
1364 if (fwd_mask & BR_GROUPFWD_RESTRICTED)
1365 return -EINVAL;
1366 br->group_fwd_mask = fwd_mask;
1367 }
1368
1369 if (data[IFLA_BR_GROUP_ADDR]) {
1370 u8 new_addr[ETH_ALEN];
1371
1372 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
1373 return -EINVAL;
1374 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
1375 if (!is_link_local_ether_addr(new_addr))
1376 return -EINVAL;
1377 if (new_addr[5] == 1 || /* 802.3x Pause address */
1378 new_addr[5] == 2 || /* 802.3ad Slow protocols */
1379 new_addr[5] == 3) /* 802.1X PAE address */
1380 return -EINVAL;
1381 spin_lock_bh(&br->lock);
1382 memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
1383 spin_unlock_bh(&br->lock);
1384 br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
1385 br_recalculate_fwd_mask(br);
1386 }
1387
1388 if (data[IFLA_BR_FDB_FLUSH]) {
1389 struct net_bridge_fdb_flush_desc desc = {
1390 .flags_mask = BIT(BR_FDB_STATIC)
1391 };
1392
1393 br_fdb_flush(br, &desc);
1394 }
1395
1396 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1397 if (data[IFLA_BR_MCAST_ROUTER]) {
1398 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
1399
1400 err = br_multicast_set_router(&br->multicast_ctx,
1401 multicast_router);
1402 if (err)
1403 return err;
1404 }
1405
1406 if (data[IFLA_BR_MCAST_SNOOPING]) {
1407 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
1408
1409 err = br_multicast_toggle(br, mcast_snooping, extack);
1410 if (err)
1411 return err;
1412 }
1413
1414 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
1415 u8 val;
1416
1417 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1418 br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
1419 }
1420
1421 if (data[IFLA_BR_MCAST_QUERIER]) {
1422 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
1423
1424 err = br_multicast_set_querier(&br->multicast_ctx,
1425 mcast_querier);
1426 if (err)
1427 return err;
1428 }
1429
1430 if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
1431 br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
1432 RHT_ELASTICITY);
1433
1434 if (data[IFLA_BR_MCAST_HASH_MAX])
1435 br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1436
1437 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
1438 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
1439
1440 br->multicast_ctx.multicast_last_member_count = val;
1441 }
1442
1443 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
1444 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
1445
1446 br->multicast_ctx.multicast_startup_query_count = val;
1447 }
1448
1449 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
1450 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
1451
1452 br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
1453 }
1454
1455 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
1456 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
1457
1458 br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
1459 }
1460
1461 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
1462 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
1463
1464 br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
1465 }
1466
1467 if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
1468 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
1469
1470 br_multicast_set_query_intvl(&br->multicast_ctx, val);
1471 }
1472
1473 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
1474 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
1475
1476 br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
1477 }
1478
1479 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
1480 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
1481
1482 br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
1483 }
1484
1485 if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1486 __u8 mcast_stats;
1487
1488 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1489 br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
1490 }
1491
1492 if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
1493 __u8 igmp_version;
1494
1495 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
1496 err = br_multicast_set_igmp_version(&br->multicast_ctx,
1497 igmp_version);
1498 if (err)
1499 return err;
1500 }
1501
1502 #if IS_ENABLED(CONFIG_IPV6)
1503 if (data[IFLA_BR_MCAST_MLD_VERSION]) {
1504 __u8 mld_version;
1505
1506 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
1507 err = br_multicast_set_mld_version(&br->multicast_ctx,
1508 mld_version);
1509 if (err)
1510 return err;
1511 }
1512 #endif
1513 #endif
1514 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1515 if (data[IFLA_BR_NF_CALL_IPTABLES]) {
1516 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
1517
1518 br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
1519 }
1520
1521 if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
1522 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
1523
1524 br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
1525 }
1526
1527 if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
1528 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
1529
1530 br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
1531 }
1532 #endif
1533
1534 if (data[IFLA_BR_MULTI_BOOLOPT]) {
1535 struct br_boolopt_multi *bm;
1536
1537 bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
1538 err = br_boolopt_multi_toggle(br, bm, extack);
1539 if (err)
1540 return err;
1541 }
1542
1543 return 0;
1544 }
1545
br_dev_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1546 static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1547 struct nlattr *tb[], struct nlattr *data[],
1548 struct netlink_ext_ack *extack)
1549 {
1550 struct net_bridge *br = netdev_priv(dev);
1551 int err;
1552
1553 err = register_netdevice(dev);
1554 if (err)
1555 return err;
1556
1557 if (tb[IFLA_ADDRESS]) {
1558 spin_lock_bh(&br->lock);
1559 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1560 spin_unlock_bh(&br->lock);
1561 }
1562
1563 err = br_changelink(dev, tb, data, extack);
1564 if (err)
1565 br_dev_delete(dev, NULL);
1566
1567 return err;
1568 }
1569
br_get_size(const struct net_device * brdev)1570 static size_t br_get_size(const struct net_device *brdev)
1571 {
1572 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
1573 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
1574 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
1575 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
1576 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
1577 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
1578 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
1579 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1580 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
1581 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
1582 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
1583 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */
1584 #endif
1585 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
1586 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
1587 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
1588 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
1589 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
1590 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
1591 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1592 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
1593 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
1594 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
1595 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1596 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
1597 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1598 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
1599 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
1600 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1601 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
1602 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
1603 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
1604 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
1605 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
1606 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1607 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
1608 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
1609 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
1610 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
1611 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
1612 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1613 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
1614 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
1615 br_multicast_querier_state_size() + /* IFLA_BR_MCAST_QUERIER_STATE */
1616 #endif
1617 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1618 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
1619 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
1620 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
1621 #endif
1622 nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
1623 0;
1624 }
1625
br_fill_info(struct sk_buff * skb,const struct net_device * brdev)1626 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1627 {
1628 struct net_bridge *br = netdev_priv(brdev);
1629 u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
1630 u32 hello_time = jiffies_to_clock_t(br->hello_time);
1631 u32 age_time = jiffies_to_clock_t(br->max_age);
1632 u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
1633 u32 stp_enabled = br->stp_enabled;
1634 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1635 u8 vlan_enabled = br_vlan_enabled(br->dev);
1636 struct br_boolopt_multi bm;
1637 u64 clockval;
1638
1639 clockval = br_timer_value(&br->hello_timer);
1640 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1641 return -EMSGSIZE;
1642 clockval = br_timer_value(&br->tcn_timer);
1643 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1644 return -EMSGSIZE;
1645 clockval = br_timer_value(&br->topology_change_timer);
1646 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
1647 IFLA_BR_PAD))
1648 return -EMSGSIZE;
1649 clockval = br_timer_value(&br->gc_work.timer);
1650 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1651 return -EMSGSIZE;
1652
1653 br_boolopt_multi_get(br, &bm);
1654 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
1655 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1656 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
1657 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
1658 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1659 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1660 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1661 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
1662 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
1663 &br->bridge_id) ||
1664 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
1665 &br->designated_root) ||
1666 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1667 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
1668 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
1669 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1670 br->topology_change_detected) ||
1671 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
1672 nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
1673 return -EMSGSIZE;
1674
1675 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1676 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1677 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1678 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
1679 br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
1680 nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
1681 br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
1682 return -EMSGSIZE;
1683 #endif
1684 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1685 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER,
1686 br->multicast_ctx.multicast_router) ||
1687 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
1688 br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
1689 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1690 br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
1691 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
1692 br->multicast_ctx.multicast_querier) ||
1693 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1694 br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
1695 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
1696 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
1697 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1698 br->multicast_ctx.multicast_last_member_count) ||
1699 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1700 br->multicast_ctx.multicast_startup_query_count) ||
1701 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
1702 br->multicast_ctx.multicast_igmp_version) ||
1703 br_multicast_dump_querier_state(skb, &br->multicast_ctx,
1704 IFLA_BR_MCAST_QUERIER_STATE))
1705 return -EMSGSIZE;
1706 #if IS_ENABLED(CONFIG_IPV6)
1707 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
1708 br->multicast_ctx.multicast_mld_version))
1709 return -EMSGSIZE;
1710 #endif
1711 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval);
1712 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
1713 IFLA_BR_PAD))
1714 return -EMSGSIZE;
1715 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval);
1716 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
1717 IFLA_BR_PAD))
1718 return -EMSGSIZE;
1719 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval);
1720 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
1721 IFLA_BR_PAD))
1722 return -EMSGSIZE;
1723 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval);
1724 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
1725 IFLA_BR_PAD))
1726 return -EMSGSIZE;
1727 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval);
1728 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
1729 IFLA_BR_PAD))
1730 return -EMSGSIZE;
1731 clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval);
1732 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
1733 IFLA_BR_PAD))
1734 return -EMSGSIZE;
1735 #endif
1736 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1737 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1738 br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
1739 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1740 br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
1741 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1742 br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
1743 return -EMSGSIZE;
1744 #endif
1745
1746 return 0;
1747 }
1748
br_get_linkxstats_size(const struct net_device * dev,int attr)1749 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1750 {
1751 struct net_bridge_port *p = NULL;
1752 struct net_bridge_vlan_group *vg;
1753 struct net_bridge_vlan *v;
1754 struct net_bridge *br;
1755 int numvls = 0;
1756
1757 switch (attr) {
1758 case IFLA_STATS_LINK_XSTATS:
1759 br = netdev_priv(dev);
1760 vg = br_vlan_group(br);
1761 break;
1762 case IFLA_STATS_LINK_XSTATS_SLAVE:
1763 p = br_port_get_rtnl(dev);
1764 if (!p)
1765 return 0;
1766 vg = nbp_vlan_group(p);
1767 break;
1768 default:
1769 return 0;
1770 }
1771
1772 if (vg) {
1773 /* we need to count all, even placeholder entries */
1774 list_for_each_entry(v, &vg->vlan_list, vlist)
1775 numvls++;
1776 }
1777
1778 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1779 nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
1780 (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
1781 nla_total_size(0);
1782 }
1783
br_fill_linkxstats(struct sk_buff * skb,const struct net_device * dev,int * prividx,int attr)1784 static int br_fill_linkxstats(struct sk_buff *skb,
1785 const struct net_device *dev,
1786 int *prividx, int attr)
1787 {
1788 struct nlattr *nla __maybe_unused;
1789 struct net_bridge_port *p = NULL;
1790 struct net_bridge_vlan_group *vg;
1791 struct net_bridge_vlan *v;
1792 struct net_bridge *br;
1793 struct nlattr *nest;
1794 int vl_idx = 0;
1795
1796 switch (attr) {
1797 case IFLA_STATS_LINK_XSTATS:
1798 br = netdev_priv(dev);
1799 vg = br_vlan_group(br);
1800 break;
1801 case IFLA_STATS_LINK_XSTATS_SLAVE:
1802 p = br_port_get_rtnl(dev);
1803 if (!p)
1804 return 0;
1805 br = p->br;
1806 vg = nbp_vlan_group(p);
1807 break;
1808 default:
1809 return -EINVAL;
1810 }
1811
1812 nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
1813 if (!nest)
1814 return -EMSGSIZE;
1815
1816 if (vg) {
1817 u16 pvid;
1818
1819 pvid = br_get_pvid(vg);
1820 list_for_each_entry(v, &vg->vlan_list, vlist) {
1821 struct bridge_vlan_xstats vxi;
1822 struct pcpu_sw_netstats stats;
1823
1824 if (++vl_idx < *prividx)
1825 continue;
1826 memset(&vxi, 0, sizeof(vxi));
1827 vxi.vid = v->vid;
1828 vxi.flags = v->flags;
1829 if (v->vid == pvid)
1830 vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1831 br_vlan_get_stats(v, &stats);
1832 vxi.rx_bytes = u64_stats_read(&stats.rx_bytes);
1833 vxi.rx_packets = u64_stats_read(&stats.rx_packets);
1834 vxi.tx_bytes = u64_stats_read(&stats.tx_bytes);
1835 vxi.tx_packets = u64_stats_read(&stats.tx_packets);
1836
1837 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1838 goto nla_put_failure;
1839 }
1840 }
1841
1842 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1843 if (++vl_idx >= *prividx) {
1844 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1845 sizeof(struct br_mcast_stats),
1846 BRIDGE_XSTATS_PAD);
1847 if (!nla)
1848 goto nla_put_failure;
1849 br_multicast_get_stats(br, p, nla_data(nla));
1850 }
1851 #endif
1852
1853 if (p) {
1854 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP,
1855 sizeof(p->stp_xstats),
1856 BRIDGE_XSTATS_PAD);
1857 if (!nla)
1858 goto nla_put_failure;
1859
1860 spin_lock_bh(&br->lock);
1861 memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats));
1862 spin_unlock_bh(&br->lock);
1863 }
1864
1865 nla_nest_end(skb, nest);
1866 *prividx = 0;
1867
1868 return 0;
1869
1870 nla_put_failure:
1871 nla_nest_end(skb, nest);
1872 *prividx = vl_idx;
1873
1874 return -EMSGSIZE;
1875 }
1876
1877 static struct rtnl_af_ops br_af_ops __read_mostly = {
1878 .family = AF_BRIDGE,
1879 .get_link_af_size = br_get_link_af_size_filtered,
1880 };
1881
1882 struct rtnl_link_ops br_link_ops __read_mostly = {
1883 .kind = "bridge",
1884 .priv_size = sizeof(struct net_bridge),
1885 .setup = br_dev_setup,
1886 .maxtype = IFLA_BR_MAX,
1887 .policy = br_policy,
1888 .validate = br_validate,
1889 .newlink = br_dev_newlink,
1890 .changelink = br_changelink,
1891 .dellink = br_dev_delete,
1892 .get_size = br_get_size,
1893 .fill_info = br_fill_info,
1894 .fill_linkxstats = br_fill_linkxstats,
1895 .get_linkxstats_size = br_get_linkxstats_size,
1896
1897 .slave_maxtype = IFLA_BRPORT_MAX,
1898 .slave_policy = br_port_policy,
1899 .slave_changelink = br_port_slave_changelink,
1900 .get_slave_size = br_port_get_slave_size,
1901 .fill_slave_info = br_port_fill_slave_info,
1902 };
1903
br_netlink_init(void)1904 int __init br_netlink_init(void)
1905 {
1906 int err;
1907
1908 err = br_vlan_rtnl_init();
1909 if (err)
1910 goto out;
1911
1912 rtnl_af_register(&br_af_ops);
1913
1914 err = rtnl_link_register(&br_link_ops);
1915 if (err)
1916 goto out_af;
1917
1918 return 0;
1919
1920 out_af:
1921 rtnl_af_unregister(&br_af_ops);
1922 out:
1923 return err;
1924 }
1925
br_netlink_fini(void)1926 void br_netlink_fini(void)
1927 {
1928 br_vlan_rtnl_uninit();
1929 rtnl_af_unregister(&br_af_ops);
1930 rtnl_link_unregister(&br_link_ops);
1931 }
1932