xref: /openbmc/linux/net/switchdev/switchdev.c (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/switchdev/switchdev.c - Switch device API
4  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
21 
switchdev_obj_eq(const struct switchdev_obj * a,const struct switchdev_obj * b)22 static bool switchdev_obj_eq(const struct switchdev_obj *a,
23 			     const struct switchdev_obj *b)
24 {
25 	const struct switchdev_obj_port_vlan *va, *vb;
26 	const struct switchdev_obj_port_mdb *ma, *mb;
27 
28 	if (a->id != b->id || a->orig_dev != b->orig_dev)
29 		return false;
30 
31 	switch (a->id) {
32 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
33 		va = SWITCHDEV_OBJ_PORT_VLAN(a);
34 		vb = SWITCHDEV_OBJ_PORT_VLAN(b);
35 		return va->flags == vb->flags &&
36 			va->vid == vb->vid &&
37 			va->changed == vb->changed;
38 	case SWITCHDEV_OBJ_ID_PORT_MDB:
39 	case SWITCHDEV_OBJ_ID_HOST_MDB:
40 		ma = SWITCHDEV_OBJ_PORT_MDB(a);
41 		mb = SWITCHDEV_OBJ_PORT_MDB(b);
42 		return ma->vid == mb->vid &&
43 			ether_addr_equal(ma->addr, mb->addr);
44 	default:
45 		break;
46 	}
47 
48 	BUG();
49 }
50 
51 static LIST_HEAD(deferred);
52 static DEFINE_SPINLOCK(deferred_lock);
53 
54 typedef void switchdev_deferred_func_t(struct net_device *dev,
55 				       const void *data);
56 
57 struct switchdev_deferred_item {
58 	struct list_head list;
59 	struct net_device *dev;
60 	netdevice_tracker dev_tracker;
61 	switchdev_deferred_func_t *func;
62 	unsigned long data[];
63 };
64 
switchdev_deferred_dequeue(void)65 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
66 {
67 	struct switchdev_deferred_item *dfitem;
68 
69 	spin_lock_bh(&deferred_lock);
70 	if (list_empty(&deferred)) {
71 		dfitem = NULL;
72 		goto unlock;
73 	}
74 	dfitem = list_first_entry(&deferred,
75 				  struct switchdev_deferred_item, list);
76 	list_del(&dfitem->list);
77 unlock:
78 	spin_unlock_bh(&deferred_lock);
79 	return dfitem;
80 }
81 
82 /**
83  *	switchdev_deferred_process - Process ops in deferred queue
84  *
85  *	Called to flush the ops currently queued in deferred ops queue.
86  *	rtnl_lock must be held.
87  */
switchdev_deferred_process(void)88 void switchdev_deferred_process(void)
89 {
90 	struct switchdev_deferred_item *dfitem;
91 
92 	ASSERT_RTNL();
93 
94 	while ((dfitem = switchdev_deferred_dequeue())) {
95 		dfitem->func(dfitem->dev, dfitem->data);
96 		netdev_put(dfitem->dev, &dfitem->dev_tracker);
97 		kfree(dfitem);
98 	}
99 }
100 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
101 
switchdev_deferred_process_work(struct work_struct * work)102 static void switchdev_deferred_process_work(struct work_struct *work)
103 {
104 	rtnl_lock();
105 	switchdev_deferred_process();
106 	rtnl_unlock();
107 }
108 
109 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
110 
switchdev_deferred_enqueue(struct net_device * dev,const void * data,size_t data_len,switchdev_deferred_func_t * func)111 static int switchdev_deferred_enqueue(struct net_device *dev,
112 				      const void *data, size_t data_len,
113 				      switchdev_deferred_func_t *func)
114 {
115 	struct switchdev_deferred_item *dfitem;
116 
117 	dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
118 	if (!dfitem)
119 		return -ENOMEM;
120 	dfitem->dev = dev;
121 	dfitem->func = func;
122 	memcpy(dfitem->data, data, data_len);
123 	netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
124 	spin_lock_bh(&deferred_lock);
125 	list_add_tail(&dfitem->list, &deferred);
126 	spin_unlock_bh(&deferred_lock);
127 	schedule_work(&deferred_process_work);
128 	return 0;
129 }
130 
switchdev_port_attr_notify(enum switchdev_notifier_type nt,struct net_device * dev,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)131 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
132 				      struct net_device *dev,
133 				      const struct switchdev_attr *attr,
134 				      struct netlink_ext_ack *extack)
135 {
136 	int err;
137 	int rc;
138 
139 	struct switchdev_notifier_port_attr_info attr_info = {
140 		.attr = attr,
141 		.handled = false,
142 	};
143 
144 	rc = call_switchdev_blocking_notifiers(nt, dev,
145 					       &attr_info.info, extack);
146 	err = notifier_to_errno(rc);
147 	if (err) {
148 		WARN_ON(!attr_info.handled);
149 		return err;
150 	}
151 
152 	if (!attr_info.handled)
153 		return -EOPNOTSUPP;
154 
155 	return 0;
156 }
157 
switchdev_port_attr_set_now(struct net_device * dev,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)158 static int switchdev_port_attr_set_now(struct net_device *dev,
159 				       const struct switchdev_attr *attr,
160 				       struct netlink_ext_ack *extack)
161 {
162 	return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
163 					  extack);
164 }
165 
switchdev_port_attr_set_deferred(struct net_device * dev,const void * data)166 static void switchdev_port_attr_set_deferred(struct net_device *dev,
167 					     const void *data)
168 {
169 	const struct switchdev_attr *attr = data;
170 	int err;
171 
172 	err = switchdev_port_attr_set_now(dev, attr, NULL);
173 	if (err && err != -EOPNOTSUPP)
174 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
175 			   err, attr->id);
176 	if (attr->complete)
177 		attr->complete(dev, err, attr->complete_priv);
178 }
179 
switchdev_port_attr_set_defer(struct net_device * dev,const struct switchdev_attr * attr)180 static int switchdev_port_attr_set_defer(struct net_device *dev,
181 					 const struct switchdev_attr *attr)
182 {
183 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
184 					  switchdev_port_attr_set_deferred);
185 }
186 
187 /**
188  *	switchdev_port_attr_set - Set port attribute
189  *
190  *	@dev: port device
191  *	@attr: attribute to set
192  *	@extack: netlink extended ack, for error message propagation
193  *
194  *	rtnl_lock must be held and must not be in atomic section,
195  *	in case SWITCHDEV_F_DEFER flag is not set.
196  */
switchdev_port_attr_set(struct net_device * dev,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)197 int switchdev_port_attr_set(struct net_device *dev,
198 			    const struct switchdev_attr *attr,
199 			    struct netlink_ext_ack *extack)
200 {
201 	if (attr->flags & SWITCHDEV_F_DEFER)
202 		return switchdev_port_attr_set_defer(dev, attr);
203 	ASSERT_RTNL();
204 	return switchdev_port_attr_set_now(dev, attr, extack);
205 }
206 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
207 
switchdev_obj_size(const struct switchdev_obj * obj)208 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
209 {
210 	switch (obj->id) {
211 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
212 		return sizeof(struct switchdev_obj_port_vlan);
213 	case SWITCHDEV_OBJ_ID_PORT_MDB:
214 		return sizeof(struct switchdev_obj_port_mdb);
215 	case SWITCHDEV_OBJ_ID_HOST_MDB:
216 		return sizeof(struct switchdev_obj_port_mdb);
217 	default:
218 		BUG();
219 	}
220 	return 0;
221 }
222 
switchdev_port_obj_notify(enum switchdev_notifier_type nt,struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)223 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
224 				     struct net_device *dev,
225 				     const struct switchdev_obj *obj,
226 				     struct netlink_ext_ack *extack)
227 {
228 	int rc;
229 	int err;
230 
231 	struct switchdev_notifier_port_obj_info obj_info = {
232 		.obj = obj,
233 		.handled = false,
234 	};
235 
236 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237 	err = notifier_to_errno(rc);
238 	if (err) {
239 		WARN_ON(!obj_info.handled);
240 		return err;
241 	}
242 	if (!obj_info.handled)
243 		return -EOPNOTSUPP;
244 	return 0;
245 }
246 
switchdev_port_obj_add_deferred(struct net_device * dev,const void * data)247 static void switchdev_port_obj_add_deferred(struct net_device *dev,
248 					    const void *data)
249 {
250 	const struct switchdev_obj *obj = data;
251 	int err;
252 
253 	ASSERT_RTNL();
254 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
255 					dev, obj, NULL);
256 	if (err && err != -EOPNOTSUPP)
257 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
258 			   err, obj->id);
259 	if (obj->complete)
260 		obj->complete(dev, err, obj->complete_priv);
261 }
262 
switchdev_port_obj_add_defer(struct net_device * dev,const struct switchdev_obj * obj)263 static int switchdev_port_obj_add_defer(struct net_device *dev,
264 					const struct switchdev_obj *obj)
265 {
266 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
267 					  switchdev_port_obj_add_deferred);
268 }
269 
270 /**
271  *	switchdev_port_obj_add - Add port object
272  *
273  *	@dev: port device
274  *	@obj: object to add
275  *	@extack: netlink extended ack
276  *
277  *	rtnl_lock must be held and must not be in atomic section,
278  *	in case SWITCHDEV_F_DEFER flag is not set.
279  */
switchdev_port_obj_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)280 int switchdev_port_obj_add(struct net_device *dev,
281 			   const struct switchdev_obj *obj,
282 			   struct netlink_ext_ack *extack)
283 {
284 	if (obj->flags & SWITCHDEV_F_DEFER)
285 		return switchdev_port_obj_add_defer(dev, obj);
286 	ASSERT_RTNL();
287 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
288 					 dev, obj, extack);
289 }
290 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
291 
switchdev_port_obj_del_now(struct net_device * dev,const struct switchdev_obj * obj)292 static int switchdev_port_obj_del_now(struct net_device *dev,
293 				      const struct switchdev_obj *obj)
294 {
295 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
296 					 dev, obj, NULL);
297 }
298 
switchdev_port_obj_del_deferred(struct net_device * dev,const void * data)299 static void switchdev_port_obj_del_deferred(struct net_device *dev,
300 					    const void *data)
301 {
302 	const struct switchdev_obj *obj = data;
303 	int err;
304 
305 	err = switchdev_port_obj_del_now(dev, obj);
306 	if (err && err != -EOPNOTSUPP)
307 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
308 			   err, obj->id);
309 	if (obj->complete)
310 		obj->complete(dev, err, obj->complete_priv);
311 }
312 
switchdev_port_obj_del_defer(struct net_device * dev,const struct switchdev_obj * obj)313 static int switchdev_port_obj_del_defer(struct net_device *dev,
314 					const struct switchdev_obj *obj)
315 {
316 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
317 					  switchdev_port_obj_del_deferred);
318 }
319 
320 /**
321  *	switchdev_port_obj_del - Delete port object
322  *
323  *	@dev: port device
324  *	@obj: object to delete
325  *
326  *	rtnl_lock must be held and must not be in atomic section,
327  *	in case SWITCHDEV_F_DEFER flag is not set.
328  */
switchdev_port_obj_del(struct net_device * dev,const struct switchdev_obj * obj)329 int switchdev_port_obj_del(struct net_device *dev,
330 			   const struct switchdev_obj *obj)
331 {
332 	if (obj->flags & SWITCHDEV_F_DEFER)
333 		return switchdev_port_obj_del_defer(dev, obj);
334 	ASSERT_RTNL();
335 	return switchdev_port_obj_del_now(dev, obj);
336 }
337 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
338 
339 /**
340  *	switchdev_port_obj_act_is_deferred - Is object action pending?
341  *
342  *	@dev: port device
343  *	@nt: type of action; add or delete
344  *	@obj: object to test
345  *
346  *	Returns true if a deferred item is pending, which is
347  *	equivalent to the action @nt on an object @obj.
348  *
349  *	rtnl_lock must be held.
350  */
switchdev_port_obj_act_is_deferred(struct net_device * dev,enum switchdev_notifier_type nt,const struct switchdev_obj * obj)351 bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
352 					enum switchdev_notifier_type nt,
353 					const struct switchdev_obj *obj)
354 {
355 	struct switchdev_deferred_item *dfitem;
356 	bool found = false;
357 
358 	ASSERT_RTNL();
359 
360 	spin_lock_bh(&deferred_lock);
361 
362 	list_for_each_entry(dfitem, &deferred, list) {
363 		if (dfitem->dev != dev)
364 			continue;
365 
366 		if ((dfitem->func == switchdev_port_obj_add_deferred &&
367 		     nt == SWITCHDEV_PORT_OBJ_ADD) ||
368 		    (dfitem->func == switchdev_port_obj_del_deferred &&
369 		     nt == SWITCHDEV_PORT_OBJ_DEL)) {
370 			if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
371 				found = true;
372 				break;
373 			}
374 		}
375 	}
376 
377 	spin_unlock_bh(&deferred_lock);
378 
379 	return found;
380 }
381 EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
382 
383 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
384 static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
385 
386 /**
387  *	register_switchdev_notifier - Register notifier
388  *	@nb: notifier_block
389  *
390  *	Register switch device notifier.
391  */
register_switchdev_notifier(struct notifier_block * nb)392 int register_switchdev_notifier(struct notifier_block *nb)
393 {
394 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
395 }
396 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
397 
398 /**
399  *	unregister_switchdev_notifier - Unregister notifier
400  *	@nb: notifier_block
401  *
402  *	Unregister switch device notifier.
403  */
unregister_switchdev_notifier(struct notifier_block * nb)404 int unregister_switchdev_notifier(struct notifier_block *nb)
405 {
406 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
407 }
408 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
409 
410 /**
411  *	call_switchdev_notifiers - Call notifiers
412  *	@val: value passed unmodified to notifier function
413  *	@dev: port device
414  *	@info: notifier information data
415  *	@extack: netlink extended ack
416  *	Call all network notifier blocks.
417  */
call_switchdev_notifiers(unsigned long val,struct net_device * dev,struct switchdev_notifier_info * info,struct netlink_ext_ack * extack)418 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
419 			     struct switchdev_notifier_info *info,
420 			     struct netlink_ext_ack *extack)
421 {
422 	info->dev = dev;
423 	info->extack = extack;
424 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
425 }
426 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
427 
register_switchdev_blocking_notifier(struct notifier_block * nb)428 int register_switchdev_blocking_notifier(struct notifier_block *nb)
429 {
430 	struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
431 	int err;
432 
433 	rtnl_lock();
434 	err = raw_notifier_chain_register(chain, nb);
435 	rtnl_unlock();
436 
437 	return err;
438 }
439 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
440 
unregister_switchdev_blocking_notifier(struct notifier_block * nb)441 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
442 {
443 	struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
444 	int err;
445 
446 	rtnl_lock();
447 	err = raw_notifier_chain_unregister(chain, nb);
448 	rtnl_unlock();
449 
450 	return err;
451 }
452 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
453 
call_switchdev_blocking_notifiers(unsigned long val,struct net_device * dev,struct switchdev_notifier_info * info,struct netlink_ext_ack * extack)454 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
455 				      struct switchdev_notifier_info *info,
456 				      struct netlink_ext_ack *extack)
457 {
458 	ASSERT_RTNL();
459 	info->dev = dev;
460 	info->extack = extack;
461 	return raw_notifier_call_chain(&switchdev_blocking_notif_chain,
462 				       val, info);
463 }
464 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
465 
466 struct switchdev_nested_priv {
467 	bool (*check_cb)(const struct net_device *dev);
468 	bool (*foreign_dev_check_cb)(const struct net_device *dev,
469 				     const struct net_device *foreign_dev);
470 	const struct net_device *dev;
471 	struct net_device *lower_dev;
472 };
473 
switchdev_lower_dev_walk(struct net_device * lower_dev,struct netdev_nested_priv * priv)474 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
475 				    struct netdev_nested_priv *priv)
476 {
477 	struct switchdev_nested_priv *switchdev_priv = priv->data;
478 	bool (*foreign_dev_check_cb)(const struct net_device *dev,
479 				     const struct net_device *foreign_dev);
480 	bool (*check_cb)(const struct net_device *dev);
481 	const struct net_device *dev;
482 
483 	check_cb = switchdev_priv->check_cb;
484 	foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
485 	dev = switchdev_priv->dev;
486 
487 	if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
488 		switchdev_priv->lower_dev = lower_dev;
489 		return 1;
490 	}
491 
492 	return 0;
493 }
494 
495 static struct net_device *
switchdev_lower_dev_find_rcu(struct net_device * dev,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev))496 switchdev_lower_dev_find_rcu(struct net_device *dev,
497 			     bool (*check_cb)(const struct net_device *dev),
498 			     bool (*foreign_dev_check_cb)(const struct net_device *dev,
499 							  const struct net_device *foreign_dev))
500 {
501 	struct switchdev_nested_priv switchdev_priv = {
502 		.check_cb = check_cb,
503 		.foreign_dev_check_cb = foreign_dev_check_cb,
504 		.dev = dev,
505 		.lower_dev = NULL,
506 	};
507 	struct netdev_nested_priv priv = {
508 		.data = &switchdev_priv,
509 	};
510 
511 	netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
512 
513 	return switchdev_priv.lower_dev;
514 }
515 
516 static struct net_device *
switchdev_lower_dev_find(struct net_device * dev,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev))517 switchdev_lower_dev_find(struct net_device *dev,
518 			 bool (*check_cb)(const struct net_device *dev),
519 			 bool (*foreign_dev_check_cb)(const struct net_device *dev,
520 						      const struct net_device *foreign_dev))
521 {
522 	struct switchdev_nested_priv switchdev_priv = {
523 		.check_cb = check_cb,
524 		.foreign_dev_check_cb = foreign_dev_check_cb,
525 		.dev = dev,
526 		.lower_dev = NULL,
527 	};
528 	struct netdev_nested_priv priv = {
529 		.data = &switchdev_priv,
530 	};
531 
532 	netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
533 
534 	return switchdev_priv.lower_dev;
535 }
536 
__switchdev_handle_fdb_event_to_device(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const struct switchdev_notifier_fdb_info * fdb_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* mod_cb)(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info))537 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
538 		struct net_device *orig_dev, unsigned long event,
539 		const struct switchdev_notifier_fdb_info *fdb_info,
540 		bool (*check_cb)(const struct net_device *dev),
541 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
542 					     const struct net_device *foreign_dev),
543 		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
544 			      unsigned long event, const void *ctx,
545 			      const struct switchdev_notifier_fdb_info *fdb_info))
546 {
547 	const struct switchdev_notifier_info *info = &fdb_info->info;
548 	struct net_device *br, *lower_dev, *switchdev;
549 	struct list_head *iter;
550 	int err = -EOPNOTSUPP;
551 
552 	if (check_cb(dev))
553 		return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
554 
555 	/* Recurse through lower interfaces in case the FDB entry is pointing
556 	 * towards a bridge or a LAG device.
557 	 */
558 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
559 		/* Do not propagate FDB entries across bridges */
560 		if (netif_is_bridge_master(lower_dev))
561 			continue;
562 
563 		/* Bridge ports might be either us, or LAG interfaces
564 		 * that we offload.
565 		 */
566 		if (!check_cb(lower_dev) &&
567 		    !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
568 						  foreign_dev_check_cb))
569 			continue;
570 
571 		err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
572 							     event, fdb_info, check_cb,
573 							     foreign_dev_check_cb,
574 							     mod_cb);
575 		if (err && err != -EOPNOTSUPP)
576 			return err;
577 	}
578 
579 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
580 	 * interface that is in a bridge with us.
581 	 */
582 	br = netdev_master_upper_dev_get_rcu(dev);
583 	if (!br || !netif_is_bridge_master(br))
584 		return 0;
585 
586 	switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
587 	if (!switchdev)
588 		return 0;
589 
590 	if (!foreign_dev_check_cb(switchdev, dev))
591 		return err;
592 
593 	return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
594 						      check_cb, foreign_dev_check_cb,
595 						      mod_cb);
596 }
597 
switchdev_handle_fdb_event_to_device(struct net_device * dev,unsigned long event,const struct switchdev_notifier_fdb_info * fdb_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* mod_cb)(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info))598 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
599 		const struct switchdev_notifier_fdb_info *fdb_info,
600 		bool (*check_cb)(const struct net_device *dev),
601 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
602 					     const struct net_device *foreign_dev),
603 		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
604 			      unsigned long event, const void *ctx,
605 			      const struct switchdev_notifier_fdb_info *fdb_info))
606 {
607 	int err;
608 
609 	err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
610 						     check_cb, foreign_dev_check_cb,
611 						     mod_cb);
612 	if (err == -EOPNOTSUPP)
613 		err = 0;
614 
615 	return err;
616 }
617 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
618 
__switchdev_handle_port_obj_add(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* add_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack))619 static int __switchdev_handle_port_obj_add(struct net_device *dev,
620 			struct switchdev_notifier_port_obj_info *port_obj_info,
621 			bool (*check_cb)(const struct net_device *dev),
622 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
623 						     const struct net_device *foreign_dev),
624 			int (*add_cb)(struct net_device *dev, const void *ctx,
625 				      const struct switchdev_obj *obj,
626 				      struct netlink_ext_ack *extack))
627 {
628 	struct switchdev_notifier_info *info = &port_obj_info->info;
629 	struct net_device *br, *lower_dev, *switchdev;
630 	struct netlink_ext_ack *extack;
631 	struct list_head *iter;
632 	int err = -EOPNOTSUPP;
633 
634 	extack = switchdev_notifier_info_to_extack(info);
635 
636 	if (check_cb(dev)) {
637 		err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
638 		if (err != -EOPNOTSUPP)
639 			port_obj_info->handled = true;
640 		return err;
641 	}
642 
643 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
644 	 * unsupported devices, another driver might be able to handle them. But
645 	 * propagate to the callers any hard errors.
646 	 *
647 	 * If the driver does its own bookkeeping of stacked ports, it's not
648 	 * necessary to go through this helper.
649 	 */
650 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
651 		if (netif_is_bridge_master(lower_dev))
652 			continue;
653 
654 		/* When searching for switchdev interfaces that are neighbors
655 		 * of foreign ones, and @dev is a bridge, do not recurse on the
656 		 * foreign interface again, it was already visited.
657 		 */
658 		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
659 		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
660 			continue;
661 
662 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
663 						      check_cb, foreign_dev_check_cb,
664 						      add_cb);
665 		if (err && err != -EOPNOTSUPP)
666 			return err;
667 	}
668 
669 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
670 	 * interface that is in a bridge with us.
671 	 */
672 	if (!foreign_dev_check_cb)
673 		return err;
674 
675 	br = netdev_master_upper_dev_get(dev);
676 	if (!br || !netif_is_bridge_master(br))
677 		return err;
678 
679 	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
680 	if (!switchdev)
681 		return err;
682 
683 	if (!foreign_dev_check_cb(switchdev, dev))
684 		return err;
685 
686 	return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
687 					       foreign_dev_check_cb, add_cb);
688 }
689 
690 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
691  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
692  * bridge or a LAG.
693  */
switchdev_handle_port_obj_add(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),int (* add_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack))694 int switchdev_handle_port_obj_add(struct net_device *dev,
695 			struct switchdev_notifier_port_obj_info *port_obj_info,
696 			bool (*check_cb)(const struct net_device *dev),
697 			int (*add_cb)(struct net_device *dev, const void *ctx,
698 				      const struct switchdev_obj *obj,
699 				      struct netlink_ext_ack *extack))
700 {
701 	int err;
702 
703 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
704 					      NULL, add_cb);
705 	if (err == -EOPNOTSUPP)
706 		err = 0;
707 	return err;
708 }
709 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
710 
711 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
712  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
713  * that pass @check_cb and are in the same bridge as @dev.
714  */
switchdev_handle_port_obj_add_foreign(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* add_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack))715 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
716 			struct switchdev_notifier_port_obj_info *port_obj_info,
717 			bool (*check_cb)(const struct net_device *dev),
718 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
719 						     const struct net_device *foreign_dev),
720 			int (*add_cb)(struct net_device *dev, const void *ctx,
721 				      const struct switchdev_obj *obj,
722 				      struct netlink_ext_ack *extack))
723 {
724 	int err;
725 
726 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
727 					      foreign_dev_check_cb, add_cb);
728 	if (err == -EOPNOTSUPP)
729 		err = 0;
730 	return err;
731 }
732 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
733 
__switchdev_handle_port_obj_del(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* del_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj))734 static int __switchdev_handle_port_obj_del(struct net_device *dev,
735 			struct switchdev_notifier_port_obj_info *port_obj_info,
736 			bool (*check_cb)(const struct net_device *dev),
737 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
738 						     const struct net_device *foreign_dev),
739 			int (*del_cb)(struct net_device *dev, const void *ctx,
740 				      const struct switchdev_obj *obj))
741 {
742 	struct switchdev_notifier_info *info = &port_obj_info->info;
743 	struct net_device *br, *lower_dev, *switchdev;
744 	struct list_head *iter;
745 	int err = -EOPNOTSUPP;
746 
747 	if (check_cb(dev)) {
748 		err = del_cb(dev, info->ctx, port_obj_info->obj);
749 		if (err != -EOPNOTSUPP)
750 			port_obj_info->handled = true;
751 		return err;
752 	}
753 
754 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
755 	 * unsupported devices, another driver might be able to handle them. But
756 	 * propagate to the callers any hard errors.
757 	 *
758 	 * If the driver does its own bookkeeping of stacked ports, it's not
759 	 * necessary to go through this helper.
760 	 */
761 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
762 		if (netif_is_bridge_master(lower_dev))
763 			continue;
764 
765 		/* When searching for switchdev interfaces that are neighbors
766 		 * of foreign ones, and @dev is a bridge, do not recurse on the
767 		 * foreign interface again, it was already visited.
768 		 */
769 		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
770 		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
771 			continue;
772 
773 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
774 						      check_cb, foreign_dev_check_cb,
775 						      del_cb);
776 		if (err && err != -EOPNOTSUPP)
777 			return err;
778 	}
779 
780 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
781 	 * interface that is in a bridge with us.
782 	 */
783 	if (!foreign_dev_check_cb)
784 		return err;
785 
786 	br = netdev_master_upper_dev_get(dev);
787 	if (!br || !netif_is_bridge_master(br))
788 		return err;
789 
790 	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
791 	if (!switchdev)
792 		return err;
793 
794 	if (!foreign_dev_check_cb(switchdev, dev))
795 		return err;
796 
797 	return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
798 					       foreign_dev_check_cb, del_cb);
799 }
800 
801 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
802  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
803  * bridge or a LAG.
804  */
switchdev_handle_port_obj_del(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),int (* del_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj))805 int switchdev_handle_port_obj_del(struct net_device *dev,
806 			struct switchdev_notifier_port_obj_info *port_obj_info,
807 			bool (*check_cb)(const struct net_device *dev),
808 			int (*del_cb)(struct net_device *dev, const void *ctx,
809 				      const struct switchdev_obj *obj))
810 {
811 	int err;
812 
813 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
814 					      NULL, del_cb);
815 	if (err == -EOPNOTSUPP)
816 		err = 0;
817 	return err;
818 }
819 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
820 
821 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
822  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
823  * that pass @check_cb and are in the same bridge as @dev.
824  */
switchdev_handle_port_obj_del_foreign(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* del_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj))825 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
826 			struct switchdev_notifier_port_obj_info *port_obj_info,
827 			bool (*check_cb)(const struct net_device *dev),
828 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
829 						     const struct net_device *foreign_dev),
830 			int (*del_cb)(struct net_device *dev, const void *ctx,
831 				      const struct switchdev_obj *obj))
832 {
833 	int err;
834 
835 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
836 					      foreign_dev_check_cb, del_cb);
837 	if (err == -EOPNOTSUPP)
838 		err = 0;
839 	return err;
840 }
841 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
842 
__switchdev_handle_port_attr_set(struct net_device * dev,struct switchdev_notifier_port_attr_info * port_attr_info,bool (* check_cb)(const struct net_device * dev),int (* set_cb)(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack))843 static int __switchdev_handle_port_attr_set(struct net_device *dev,
844 			struct switchdev_notifier_port_attr_info *port_attr_info,
845 			bool (*check_cb)(const struct net_device *dev),
846 			int (*set_cb)(struct net_device *dev, const void *ctx,
847 				      const struct switchdev_attr *attr,
848 				      struct netlink_ext_ack *extack))
849 {
850 	struct switchdev_notifier_info *info = &port_attr_info->info;
851 	struct netlink_ext_ack *extack;
852 	struct net_device *lower_dev;
853 	struct list_head *iter;
854 	int err = -EOPNOTSUPP;
855 
856 	extack = switchdev_notifier_info_to_extack(info);
857 
858 	if (check_cb(dev)) {
859 		err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
860 		if (err != -EOPNOTSUPP)
861 			port_attr_info->handled = true;
862 		return err;
863 	}
864 
865 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
866 	 * unsupported devices, another driver might be able to handle them. But
867 	 * propagate to the callers any hard errors.
868 	 *
869 	 * If the driver does its own bookkeeping of stacked ports, it's not
870 	 * necessary to go through this helper.
871 	 */
872 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
873 		if (netif_is_bridge_master(lower_dev))
874 			continue;
875 
876 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
877 						       check_cb, set_cb);
878 		if (err && err != -EOPNOTSUPP)
879 			return err;
880 	}
881 
882 	return err;
883 }
884 
switchdev_handle_port_attr_set(struct net_device * dev,struct switchdev_notifier_port_attr_info * port_attr_info,bool (* check_cb)(const struct net_device * dev),int (* set_cb)(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack))885 int switchdev_handle_port_attr_set(struct net_device *dev,
886 			struct switchdev_notifier_port_attr_info *port_attr_info,
887 			bool (*check_cb)(const struct net_device *dev),
888 			int (*set_cb)(struct net_device *dev, const void *ctx,
889 				      const struct switchdev_attr *attr,
890 				      struct netlink_ext_ack *extack))
891 {
892 	int err;
893 
894 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
895 					       set_cb);
896 	if (err == -EOPNOTSUPP)
897 		err = 0;
898 	return err;
899 }
900 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
901 
switchdev_bridge_port_offload(struct net_device * brport_dev,struct net_device * dev,const void * ctx,struct notifier_block * atomic_nb,struct notifier_block * blocking_nb,bool tx_fwd_offload,struct netlink_ext_ack * extack)902 int switchdev_bridge_port_offload(struct net_device *brport_dev,
903 				  struct net_device *dev, const void *ctx,
904 				  struct notifier_block *atomic_nb,
905 				  struct notifier_block *blocking_nb,
906 				  bool tx_fwd_offload,
907 				  struct netlink_ext_ack *extack)
908 {
909 	struct switchdev_notifier_brport_info brport_info = {
910 		.brport = {
911 			.dev = dev,
912 			.ctx = ctx,
913 			.atomic_nb = atomic_nb,
914 			.blocking_nb = blocking_nb,
915 			.tx_fwd_offload = tx_fwd_offload,
916 		},
917 	};
918 	int err;
919 
920 	ASSERT_RTNL();
921 
922 	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
923 						brport_dev, &brport_info.info,
924 						extack);
925 	return notifier_to_errno(err);
926 }
927 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
928 
switchdev_bridge_port_unoffload(struct net_device * brport_dev,const void * ctx,struct notifier_block * atomic_nb,struct notifier_block * blocking_nb)929 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
930 				     const void *ctx,
931 				     struct notifier_block *atomic_nb,
932 				     struct notifier_block *blocking_nb)
933 {
934 	struct switchdev_notifier_brport_info brport_info = {
935 		.brport = {
936 			.ctx = ctx,
937 			.atomic_nb = atomic_nb,
938 			.blocking_nb = blocking_nb,
939 		},
940 	};
941 
942 	ASSERT_RTNL();
943 
944 	call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
945 					  brport_dev, &brport_info.info,
946 					  NULL);
947 }
948 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
949 
switchdev_bridge_port_replay(struct net_device * brport_dev,struct net_device * dev,const void * ctx,struct notifier_block * atomic_nb,struct notifier_block * blocking_nb,struct netlink_ext_ack * extack)950 int switchdev_bridge_port_replay(struct net_device *brport_dev,
951 				 struct net_device *dev, const void *ctx,
952 				 struct notifier_block *atomic_nb,
953 				 struct notifier_block *blocking_nb,
954 				 struct netlink_ext_ack *extack)
955 {
956 	struct switchdev_notifier_brport_info brport_info = {
957 		.brport = {
958 			.dev = dev,
959 			.ctx = ctx,
960 			.atomic_nb = atomic_nb,
961 			.blocking_nb = blocking_nb,
962 		},
963 	};
964 	int err;
965 
966 	ASSERT_RTNL();
967 
968 	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY,
969 						brport_dev, &brport_info.info,
970 						extack);
971 	return notifier_to_errno(err);
972 }
973 EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);
974