xref: /openbmc/linux/net/dsa/switch.c (revision c796f021)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	struct dsa_port *dp;
21 
22 	dsa_switch_for_each_port(dp, ds)
23 		if (dp->ageing_time && dp->ageing_time < ageing_time)
24 			ageing_time = dp->ageing_time;
25 
26 	return ageing_time;
27 }
28 
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 				  struct dsa_notifier_ageing_time_info *info)
31 {
32 	unsigned int ageing_time = info->ageing_time;
33 
34 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 		return -ERANGE;
36 
37 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 		return -ERANGE;
39 
40 	/* Program the fastest ageing time in case of multiple bridges */
41 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42 
43 	if (ds->ops->set_ageing_time)
44 		return ds->ops->set_ageing_time(ds, ageing_time);
45 
46 	return 0;
47 }
48 
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 			       struct dsa_notifier_mtu_info *info)
51 {
52 	if (dp->ds->index == info->sw_index && dp->index == info->port)
53 		return true;
54 
55 	/* Do not propagate to other switches in the tree if the notifier was
56 	 * targeted for a single switch.
57 	 */
58 	if (info->targeted_match)
59 		return false;
60 
61 	if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
62 		return true;
63 
64 	return false;
65 }
66 
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 			  struct dsa_notifier_mtu_info *info)
69 {
70 	struct dsa_port *dp;
71 	int ret;
72 
73 	if (!ds->ops->port_change_mtu)
74 		return -EOPNOTSUPP;
75 
76 	dsa_switch_for_each_port(dp, ds) {
77 		if (dsa_port_mtu_match(dp, info)) {
78 			ret = ds->ops->port_change_mtu(ds, dp->index,
79 						       info->mtu);
80 			if (ret)
81 				return ret;
82 		}
83 	}
84 
85 	return 0;
86 }
87 
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 				  struct dsa_notifier_bridge_info *info)
90 {
91 	struct dsa_switch_tree *dst = ds->dst;
92 	int err;
93 
94 	if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 		if (!ds->ops->port_bridge_join)
96 			return -EOPNOTSUPP;
97 
98 		err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
99 						&info->tx_fwd_offload,
100 						info->extack);
101 		if (err)
102 			return err;
103 	}
104 
105 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
106 	    ds->ops->crosschip_bridge_join) {
107 		err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
108 						     info->sw_index,
109 						     info->port, info->bridge,
110 						     info->extack);
111 		if (err)
112 			return err;
113 	}
114 
115 	return 0;
116 }
117 
118 static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds,
119 					  struct dsa_notifier_bridge_info *info)
120 {
121 	struct netlink_ext_ack extack = {0};
122 	bool change_vlan_filtering = false;
123 	bool vlan_filtering;
124 	struct dsa_port *dp;
125 	int err;
126 
127 	if (ds->needs_standalone_vlan_filtering &&
128 	    !br_vlan_enabled(info->bridge.dev)) {
129 		change_vlan_filtering = true;
130 		vlan_filtering = true;
131 	} else if (!ds->needs_standalone_vlan_filtering &&
132 		   br_vlan_enabled(info->bridge.dev)) {
133 		change_vlan_filtering = true;
134 		vlan_filtering = false;
135 	}
136 
137 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
138 	 * event for changing vlan_filtering setting upon slave ports leaving
139 	 * it. That is a good thing, because that lets us handle it and also
140 	 * handle the case where the switch's vlan_filtering setting is global
141 	 * (not per port). When that happens, the correct moment to trigger the
142 	 * vlan_filtering callback is only when the last port leaves the last
143 	 * VLAN-aware bridge.
144 	 */
145 	if (change_vlan_filtering && ds->vlan_filtering_is_global) {
146 		dsa_switch_for_each_port(dp, ds) {
147 			struct net_device *br = dsa_port_bridge_dev_get(dp);
148 
149 			if (br && br_vlan_enabled(br)) {
150 				change_vlan_filtering = false;
151 				break;
152 			}
153 		}
154 	}
155 
156 	if (change_vlan_filtering) {
157 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
158 					      vlan_filtering, &extack);
159 		if (extack._msg)
160 			dev_err(ds->dev, "port %d: %s\n", info->port,
161 				extack._msg);
162 		if (err && err != -EOPNOTSUPP)
163 			return err;
164 	}
165 
166 	return 0;
167 }
168 
169 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
170 				   struct dsa_notifier_bridge_info *info)
171 {
172 	struct dsa_switch_tree *dst = ds->dst;
173 	int err;
174 
175 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
176 	    ds->ops->port_bridge_leave)
177 		ds->ops->port_bridge_leave(ds, info->port, info->bridge);
178 
179 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
180 	    ds->ops->crosschip_bridge_leave)
181 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
182 						info->sw_index, info->port,
183 						info->bridge);
184 
185 	if (ds->dst->index == info->tree_index && ds->index == info->sw_index) {
186 		err = dsa_switch_sync_vlan_filtering(ds, info);
187 		if (err)
188 			return err;
189 	}
190 
191 	return 0;
192 }
193 
194 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
195  * DSA links) that sit between the targeted port on which the notifier was
196  * emitted and its dedicated CPU port.
197  */
198 static bool dsa_port_host_address_match(struct dsa_port *dp,
199 					int info_sw_index, int info_port)
200 {
201 	struct dsa_port *targeted_dp, *cpu_dp;
202 	struct dsa_switch *targeted_ds;
203 
204 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
205 	targeted_dp = dsa_to_port(targeted_ds, info_port);
206 	cpu_dp = targeted_dp->cpu_dp;
207 
208 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
209 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
210 						     cpu_dp->index);
211 
212 	return false;
213 }
214 
215 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
216 					      const unsigned char *addr, u16 vid,
217 					      struct dsa_db db)
218 {
219 	struct dsa_mac_addr *a;
220 
221 	list_for_each_entry(a, addr_list, list)
222 		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
223 		    dsa_db_equal(&a->db, &db))
224 			return a;
225 
226 	return NULL;
227 }
228 
229 static int dsa_port_do_mdb_add(struct dsa_port *dp,
230 			       const struct switchdev_obj_port_mdb *mdb,
231 			       struct dsa_db db)
232 {
233 	struct dsa_switch *ds = dp->ds;
234 	struct dsa_mac_addr *a;
235 	int port = dp->index;
236 	int err = 0;
237 
238 	/* No need to bother with refcounting for user ports */
239 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
240 		return ds->ops->port_mdb_add(ds, port, mdb, db);
241 
242 	mutex_lock(&dp->addr_lists_lock);
243 
244 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
245 	if (a) {
246 		refcount_inc(&a->refcount);
247 		goto out;
248 	}
249 
250 	a = kzalloc(sizeof(*a), GFP_KERNEL);
251 	if (!a) {
252 		err = -ENOMEM;
253 		goto out;
254 	}
255 
256 	err = ds->ops->port_mdb_add(ds, port, mdb, db);
257 	if (err) {
258 		kfree(a);
259 		goto out;
260 	}
261 
262 	ether_addr_copy(a->addr, mdb->addr);
263 	a->vid = mdb->vid;
264 	a->db = db;
265 	refcount_set(&a->refcount, 1);
266 	list_add_tail(&a->list, &dp->mdbs);
267 
268 out:
269 	mutex_unlock(&dp->addr_lists_lock);
270 
271 	return err;
272 }
273 
274 static int dsa_port_do_mdb_del(struct dsa_port *dp,
275 			       const struct switchdev_obj_port_mdb *mdb,
276 			       struct dsa_db db)
277 {
278 	struct dsa_switch *ds = dp->ds;
279 	struct dsa_mac_addr *a;
280 	int port = dp->index;
281 	int err = 0;
282 
283 	/* No need to bother with refcounting for user ports */
284 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
285 		return ds->ops->port_mdb_del(ds, port, mdb, db);
286 
287 	mutex_lock(&dp->addr_lists_lock);
288 
289 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
290 	if (!a) {
291 		err = -ENOENT;
292 		goto out;
293 	}
294 
295 	if (!refcount_dec_and_test(&a->refcount))
296 		goto out;
297 
298 	err = ds->ops->port_mdb_del(ds, port, mdb, db);
299 	if (err) {
300 		refcount_set(&a->refcount, 1);
301 		goto out;
302 	}
303 
304 	list_del(&a->list);
305 	kfree(a);
306 
307 out:
308 	mutex_unlock(&dp->addr_lists_lock);
309 
310 	return err;
311 }
312 
313 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
314 			       u16 vid, struct dsa_db db)
315 {
316 	struct dsa_switch *ds = dp->ds;
317 	struct dsa_mac_addr *a;
318 	int port = dp->index;
319 	int err = 0;
320 
321 	/* No need to bother with refcounting for user ports */
322 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
323 		return ds->ops->port_fdb_add(ds, port, addr, vid, db);
324 
325 	mutex_lock(&dp->addr_lists_lock);
326 
327 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
328 	if (a) {
329 		refcount_inc(&a->refcount);
330 		goto out;
331 	}
332 
333 	a = kzalloc(sizeof(*a), GFP_KERNEL);
334 	if (!a) {
335 		err = -ENOMEM;
336 		goto out;
337 	}
338 
339 	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
340 	if (err) {
341 		kfree(a);
342 		goto out;
343 	}
344 
345 	ether_addr_copy(a->addr, addr);
346 	a->vid = vid;
347 	a->db = db;
348 	refcount_set(&a->refcount, 1);
349 	list_add_tail(&a->list, &dp->fdbs);
350 
351 out:
352 	mutex_unlock(&dp->addr_lists_lock);
353 
354 	return err;
355 }
356 
357 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
358 			       u16 vid, struct dsa_db db)
359 {
360 	struct dsa_switch *ds = dp->ds;
361 	struct dsa_mac_addr *a;
362 	int port = dp->index;
363 	int err = 0;
364 
365 	/* No need to bother with refcounting for user ports */
366 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
367 		return ds->ops->port_fdb_del(ds, port, addr, vid, db);
368 
369 	mutex_lock(&dp->addr_lists_lock);
370 
371 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
372 	if (!a) {
373 		err = -ENOENT;
374 		goto out;
375 	}
376 
377 	if (!refcount_dec_and_test(&a->refcount))
378 		goto out;
379 
380 	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
381 	if (err) {
382 		refcount_set(&a->refcount, 1);
383 		goto out;
384 	}
385 
386 	list_del(&a->list);
387 	kfree(a);
388 
389 out:
390 	mutex_unlock(&dp->addr_lists_lock);
391 
392 	return err;
393 }
394 
395 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
396 				     const unsigned char *addr, u16 vid,
397 				     struct dsa_db db)
398 {
399 	struct dsa_mac_addr *a;
400 	int err = 0;
401 
402 	mutex_lock(&lag->fdb_lock);
403 
404 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
405 	if (a) {
406 		refcount_inc(&a->refcount);
407 		goto out;
408 	}
409 
410 	a = kzalloc(sizeof(*a), GFP_KERNEL);
411 	if (!a) {
412 		err = -ENOMEM;
413 		goto out;
414 	}
415 
416 	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
417 	if (err) {
418 		kfree(a);
419 		goto out;
420 	}
421 
422 	ether_addr_copy(a->addr, addr);
423 	a->vid = vid;
424 	refcount_set(&a->refcount, 1);
425 	list_add_tail(&a->list, &lag->fdbs);
426 
427 out:
428 	mutex_unlock(&lag->fdb_lock);
429 
430 	return err;
431 }
432 
433 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
434 				     const unsigned char *addr, u16 vid,
435 				     struct dsa_db db)
436 {
437 	struct dsa_mac_addr *a;
438 	int err = 0;
439 
440 	mutex_lock(&lag->fdb_lock);
441 
442 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
443 	if (!a) {
444 		err = -ENOENT;
445 		goto out;
446 	}
447 
448 	if (!refcount_dec_and_test(&a->refcount))
449 		goto out;
450 
451 	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
452 	if (err) {
453 		refcount_set(&a->refcount, 1);
454 		goto out;
455 	}
456 
457 	list_del(&a->list);
458 	kfree(a);
459 
460 out:
461 	mutex_unlock(&lag->fdb_lock);
462 
463 	return err;
464 }
465 
466 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
467 				   struct dsa_notifier_fdb_info *info)
468 {
469 	struct dsa_port *dp;
470 	int err = 0;
471 
472 	if (!ds->ops->port_fdb_add)
473 		return -EOPNOTSUPP;
474 
475 	dsa_switch_for_each_port(dp, ds) {
476 		if (dsa_port_host_address_match(dp, info->sw_index,
477 						info->port)) {
478 			err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
479 						  info->db);
480 			if (err)
481 				break;
482 		}
483 	}
484 
485 	return err;
486 }
487 
488 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
489 				   struct dsa_notifier_fdb_info *info)
490 {
491 	struct dsa_port *dp;
492 	int err = 0;
493 
494 	if (!ds->ops->port_fdb_del)
495 		return -EOPNOTSUPP;
496 
497 	dsa_switch_for_each_port(dp, ds) {
498 		if (dsa_port_host_address_match(dp, info->sw_index,
499 						info->port)) {
500 			err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
501 						  info->db);
502 			if (err)
503 				break;
504 		}
505 	}
506 
507 	return err;
508 }
509 
510 static int dsa_switch_fdb_add(struct dsa_switch *ds,
511 			      struct dsa_notifier_fdb_info *info)
512 {
513 	int port = dsa_towards_port(ds, info->sw_index, info->port);
514 	struct dsa_port *dp = dsa_to_port(ds, port);
515 
516 	if (!ds->ops->port_fdb_add)
517 		return -EOPNOTSUPP;
518 
519 	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
520 }
521 
522 static int dsa_switch_fdb_del(struct dsa_switch *ds,
523 			      struct dsa_notifier_fdb_info *info)
524 {
525 	int port = dsa_towards_port(ds, info->sw_index, info->port);
526 	struct dsa_port *dp = dsa_to_port(ds, port);
527 
528 	if (!ds->ops->port_fdb_del)
529 		return -EOPNOTSUPP;
530 
531 	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
532 }
533 
534 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
535 				  struct dsa_notifier_lag_fdb_info *info)
536 {
537 	struct dsa_port *dp;
538 
539 	if (!ds->ops->lag_fdb_add)
540 		return -EOPNOTSUPP;
541 
542 	/* Notify switch only if it has a port in this LAG */
543 	dsa_switch_for_each_port(dp, ds)
544 		if (dsa_port_offloads_lag(dp, info->lag))
545 			return dsa_switch_do_lag_fdb_add(ds, info->lag,
546 							 info->addr, info->vid,
547 							 info->db);
548 
549 	return 0;
550 }
551 
552 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
553 				  struct dsa_notifier_lag_fdb_info *info)
554 {
555 	struct dsa_port *dp;
556 
557 	if (!ds->ops->lag_fdb_del)
558 		return -EOPNOTSUPP;
559 
560 	/* Notify switch only if it has a port in this LAG */
561 	dsa_switch_for_each_port(dp, ds)
562 		if (dsa_port_offloads_lag(dp, info->lag))
563 			return dsa_switch_do_lag_fdb_del(ds, info->lag,
564 							 info->addr, info->vid,
565 							 info->db);
566 
567 	return 0;
568 }
569 
570 static int dsa_switch_lag_change(struct dsa_switch *ds,
571 				 struct dsa_notifier_lag_info *info)
572 {
573 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
574 		return ds->ops->port_lag_change(ds, info->port);
575 
576 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
577 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
578 						     info->port);
579 
580 	return 0;
581 }
582 
583 static int dsa_switch_lag_join(struct dsa_switch *ds,
584 			       struct dsa_notifier_lag_info *info)
585 {
586 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
587 		return ds->ops->port_lag_join(ds, info->port, info->lag,
588 					      info->info);
589 
590 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
591 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
592 						   info->port, info->lag,
593 						   info->info);
594 
595 	return -EOPNOTSUPP;
596 }
597 
598 static int dsa_switch_lag_leave(struct dsa_switch *ds,
599 				struct dsa_notifier_lag_info *info)
600 {
601 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
602 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
603 
604 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
605 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
606 						    info->port, info->lag);
607 
608 	return -EOPNOTSUPP;
609 }
610 
611 static int dsa_switch_mdb_add(struct dsa_switch *ds,
612 			      struct dsa_notifier_mdb_info *info)
613 {
614 	int port = dsa_towards_port(ds, info->sw_index, info->port);
615 	struct dsa_port *dp = dsa_to_port(ds, port);
616 
617 	if (!ds->ops->port_mdb_add)
618 		return -EOPNOTSUPP;
619 
620 	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
621 }
622 
623 static int dsa_switch_mdb_del(struct dsa_switch *ds,
624 			      struct dsa_notifier_mdb_info *info)
625 {
626 	int port = dsa_towards_port(ds, info->sw_index, info->port);
627 	struct dsa_port *dp = dsa_to_port(ds, port);
628 
629 	if (!ds->ops->port_mdb_del)
630 		return -EOPNOTSUPP;
631 
632 	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
633 }
634 
635 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
636 				   struct dsa_notifier_mdb_info *info)
637 {
638 	struct dsa_port *dp;
639 	int err = 0;
640 
641 	if (!ds->ops->port_mdb_add)
642 		return -EOPNOTSUPP;
643 
644 	dsa_switch_for_each_port(dp, ds) {
645 		if (dsa_port_host_address_match(dp, info->sw_index,
646 						info->port)) {
647 			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
648 			if (err)
649 				break;
650 		}
651 	}
652 
653 	return err;
654 }
655 
656 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
657 				   struct dsa_notifier_mdb_info *info)
658 {
659 	struct dsa_port *dp;
660 	int err = 0;
661 
662 	if (!ds->ops->port_mdb_del)
663 		return -EOPNOTSUPP;
664 
665 	dsa_switch_for_each_port(dp, ds) {
666 		if (dsa_port_host_address_match(dp, info->sw_index,
667 						info->port)) {
668 			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
669 			if (err)
670 				break;
671 		}
672 	}
673 
674 	return err;
675 }
676 
677 /* Port VLANs match on the targeted port and on all DSA ports */
678 static bool dsa_port_vlan_match(struct dsa_port *dp,
679 				struct dsa_notifier_vlan_info *info)
680 {
681 	if (dp->ds->index == info->sw_index && dp->index == info->port)
682 		return true;
683 
684 	if (dsa_port_is_dsa(dp))
685 		return true;
686 
687 	return false;
688 }
689 
690 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
691  * (upstream and downstream) of that switch and its upstream switches.
692  */
693 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
694 				     struct dsa_notifier_vlan_info *info)
695 {
696 	struct dsa_port *targeted_dp, *cpu_dp;
697 	struct dsa_switch *targeted_ds;
698 
699 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info->sw_index);
700 	targeted_dp = dsa_to_port(targeted_ds, info->port);
701 	cpu_dp = targeted_dp->cpu_dp;
702 
703 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
704 		return dsa_port_is_dsa(dp) || dp == cpu_dp;
705 
706 	return false;
707 }
708 
709 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
710 				      const struct switchdev_obj_port_vlan *vlan)
711 {
712 	struct dsa_vlan *v;
713 
714 	list_for_each_entry(v, vlan_list, list)
715 		if (v->vid == vlan->vid)
716 			return v;
717 
718 	return NULL;
719 }
720 
721 static int dsa_port_do_vlan_add(struct dsa_port *dp,
722 				const struct switchdev_obj_port_vlan *vlan,
723 				struct netlink_ext_ack *extack)
724 {
725 	struct dsa_switch *ds = dp->ds;
726 	int port = dp->index;
727 	struct dsa_vlan *v;
728 	int err = 0;
729 
730 	/* No need to bother with refcounting for user ports. */
731 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
732 		return ds->ops->port_vlan_add(ds, port, vlan, extack);
733 
734 	/* No need to propagate on shared ports the existing VLANs that were
735 	 * re-notified after just the flags have changed. This would cause a
736 	 * refcount bump which we need to avoid, since it unbalances the
737 	 * additions with the deletions.
738 	 */
739 	if (vlan->changed)
740 		return 0;
741 
742 	mutex_lock(&dp->vlans_lock);
743 
744 	v = dsa_vlan_find(&dp->vlans, vlan);
745 	if (v) {
746 		refcount_inc(&v->refcount);
747 		goto out;
748 	}
749 
750 	v = kzalloc(sizeof(*v), GFP_KERNEL);
751 	if (!v) {
752 		err = -ENOMEM;
753 		goto out;
754 	}
755 
756 	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
757 	if (err) {
758 		kfree(v);
759 		goto out;
760 	}
761 
762 	v->vid = vlan->vid;
763 	refcount_set(&v->refcount, 1);
764 	list_add_tail(&v->list, &dp->vlans);
765 
766 out:
767 	mutex_unlock(&dp->vlans_lock);
768 
769 	return err;
770 }
771 
772 static int dsa_port_do_vlan_del(struct dsa_port *dp,
773 				const struct switchdev_obj_port_vlan *vlan)
774 {
775 	struct dsa_switch *ds = dp->ds;
776 	int port = dp->index;
777 	struct dsa_vlan *v;
778 	int err = 0;
779 
780 	/* No need to bother with refcounting for user ports */
781 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
782 		return ds->ops->port_vlan_del(ds, port, vlan);
783 
784 	mutex_lock(&dp->vlans_lock);
785 
786 	v = dsa_vlan_find(&dp->vlans, vlan);
787 	if (!v) {
788 		err = -ENOENT;
789 		goto out;
790 	}
791 
792 	if (!refcount_dec_and_test(&v->refcount))
793 		goto out;
794 
795 	err = ds->ops->port_vlan_del(ds, port, vlan);
796 	if (err) {
797 		refcount_set(&v->refcount, 1);
798 		goto out;
799 	}
800 
801 	list_del(&v->list);
802 	kfree(v);
803 
804 out:
805 	mutex_unlock(&dp->vlans_lock);
806 
807 	return err;
808 }
809 
810 static int dsa_switch_vlan_add(struct dsa_switch *ds,
811 			       struct dsa_notifier_vlan_info *info)
812 {
813 	struct dsa_port *dp;
814 	int err;
815 
816 	if (!ds->ops->port_vlan_add)
817 		return -EOPNOTSUPP;
818 
819 	dsa_switch_for_each_port(dp, ds) {
820 		if (dsa_port_vlan_match(dp, info)) {
821 			err = dsa_port_do_vlan_add(dp, info->vlan,
822 						   info->extack);
823 			if (err)
824 				return err;
825 		}
826 	}
827 
828 	return 0;
829 }
830 
831 static int dsa_switch_vlan_del(struct dsa_switch *ds,
832 			       struct dsa_notifier_vlan_info *info)
833 {
834 	struct dsa_port *dp;
835 	int err;
836 
837 	if (!ds->ops->port_vlan_del)
838 		return -EOPNOTSUPP;
839 
840 	dsa_switch_for_each_port(dp, ds) {
841 		if (dsa_port_vlan_match(dp, info)) {
842 			err = dsa_port_do_vlan_del(dp, info->vlan);
843 			if (err)
844 				return err;
845 		}
846 	}
847 
848 	return 0;
849 }
850 
851 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
852 				    struct dsa_notifier_vlan_info *info)
853 {
854 	struct dsa_port *dp;
855 	int err;
856 
857 	if (!ds->ops->port_vlan_add)
858 		return -EOPNOTSUPP;
859 
860 	dsa_switch_for_each_port(dp, ds) {
861 		if (dsa_port_host_vlan_match(dp, info)) {
862 			err = dsa_port_do_vlan_add(dp, info->vlan,
863 						   info->extack);
864 			if (err)
865 				return err;
866 		}
867 	}
868 
869 	return 0;
870 }
871 
872 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
873 				    struct dsa_notifier_vlan_info *info)
874 {
875 	struct dsa_port *dp;
876 	int err;
877 
878 	if (!ds->ops->port_vlan_del)
879 		return -EOPNOTSUPP;
880 
881 	dsa_switch_for_each_port(dp, ds) {
882 		if (dsa_port_host_vlan_match(dp, info)) {
883 			err = dsa_port_do_vlan_del(dp, info->vlan);
884 			if (err)
885 				return err;
886 		}
887 	}
888 
889 	return 0;
890 }
891 
892 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
893 				       struct dsa_notifier_tag_proto_info *info)
894 {
895 	const struct dsa_device_ops *tag_ops = info->tag_ops;
896 	struct dsa_port *dp, *cpu_dp;
897 	int err;
898 
899 	if (!ds->ops->change_tag_protocol)
900 		return -EOPNOTSUPP;
901 
902 	ASSERT_RTNL();
903 
904 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
905 		err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
906 						   tag_ops->proto);
907 		if (err)
908 			return err;
909 
910 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
911 	}
912 
913 	/* Now that changing the tag protocol can no longer fail, let's update
914 	 * the remaining bits which are "duplicated for faster access", and the
915 	 * bits that depend on the tagger, such as the MTU.
916 	 */
917 	dsa_switch_for_each_user_port(dp, ds) {
918 		struct net_device *slave = dp->slave;
919 
920 		dsa_slave_setup_tagger(slave);
921 
922 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
923 		dsa_slave_change_mtu(slave, slave->mtu);
924 	}
925 
926 	return 0;
927 }
928 
929 /* We use the same cross-chip notifiers to inform both the tagger side, as well
930  * as the switch side, of connection and disconnection events.
931  * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
932  * switch side doesn't support connecting to this tagger, and therefore, the
933  * fact that we don't disconnect the tagger side doesn't constitute a memory
934  * leak: the tagger will still operate with persistent per-switch memory, just
935  * with the switch side unconnected to it. What does constitute a hard error is
936  * when the switch side supports connecting but fails.
937  */
938 static int
939 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
940 			     struct dsa_notifier_tag_proto_info *info)
941 {
942 	const struct dsa_device_ops *tag_ops = info->tag_ops;
943 	int err;
944 
945 	/* Notify the new tagger about the connection to this switch */
946 	if (tag_ops->connect) {
947 		err = tag_ops->connect(ds);
948 		if (err)
949 			return err;
950 	}
951 
952 	if (!ds->ops->connect_tag_protocol)
953 		return -EOPNOTSUPP;
954 
955 	/* Notify the switch about the connection to the new tagger */
956 	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
957 	if (err) {
958 		/* Revert the new tagger's connection to this tree */
959 		if (tag_ops->disconnect)
960 			tag_ops->disconnect(ds);
961 		return err;
962 	}
963 
964 	return 0;
965 }
966 
967 static int
968 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
969 				struct dsa_notifier_tag_proto_info *info)
970 {
971 	const struct dsa_device_ops *tag_ops = info->tag_ops;
972 
973 	/* Notify the tagger about the disconnection from this switch */
974 	if (tag_ops->disconnect && ds->tagger_data)
975 		tag_ops->disconnect(ds);
976 
977 	/* No need to notify the switch, since it shouldn't have any
978 	 * resources to tear down
979 	 */
980 	return 0;
981 }
982 
983 static int
984 dsa_switch_master_state_change(struct dsa_switch *ds,
985 			       struct dsa_notifier_master_state_info *info)
986 {
987 	if (!ds->ops->master_state_change)
988 		return 0;
989 
990 	ds->ops->master_state_change(ds, info->master, info->operational);
991 
992 	return 0;
993 }
994 
995 static int dsa_switch_event(struct notifier_block *nb,
996 			    unsigned long event, void *info)
997 {
998 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
999 	int err;
1000 
1001 	switch (event) {
1002 	case DSA_NOTIFIER_AGEING_TIME:
1003 		err = dsa_switch_ageing_time(ds, info);
1004 		break;
1005 	case DSA_NOTIFIER_BRIDGE_JOIN:
1006 		err = dsa_switch_bridge_join(ds, info);
1007 		break;
1008 	case DSA_NOTIFIER_BRIDGE_LEAVE:
1009 		err = dsa_switch_bridge_leave(ds, info);
1010 		break;
1011 	case DSA_NOTIFIER_FDB_ADD:
1012 		err = dsa_switch_fdb_add(ds, info);
1013 		break;
1014 	case DSA_NOTIFIER_FDB_DEL:
1015 		err = dsa_switch_fdb_del(ds, info);
1016 		break;
1017 	case DSA_NOTIFIER_HOST_FDB_ADD:
1018 		err = dsa_switch_host_fdb_add(ds, info);
1019 		break;
1020 	case DSA_NOTIFIER_HOST_FDB_DEL:
1021 		err = dsa_switch_host_fdb_del(ds, info);
1022 		break;
1023 	case DSA_NOTIFIER_LAG_FDB_ADD:
1024 		err = dsa_switch_lag_fdb_add(ds, info);
1025 		break;
1026 	case DSA_NOTIFIER_LAG_FDB_DEL:
1027 		err = dsa_switch_lag_fdb_del(ds, info);
1028 		break;
1029 	case DSA_NOTIFIER_LAG_CHANGE:
1030 		err = dsa_switch_lag_change(ds, info);
1031 		break;
1032 	case DSA_NOTIFIER_LAG_JOIN:
1033 		err = dsa_switch_lag_join(ds, info);
1034 		break;
1035 	case DSA_NOTIFIER_LAG_LEAVE:
1036 		err = dsa_switch_lag_leave(ds, info);
1037 		break;
1038 	case DSA_NOTIFIER_MDB_ADD:
1039 		err = dsa_switch_mdb_add(ds, info);
1040 		break;
1041 	case DSA_NOTIFIER_MDB_DEL:
1042 		err = dsa_switch_mdb_del(ds, info);
1043 		break;
1044 	case DSA_NOTIFIER_HOST_MDB_ADD:
1045 		err = dsa_switch_host_mdb_add(ds, info);
1046 		break;
1047 	case DSA_NOTIFIER_HOST_MDB_DEL:
1048 		err = dsa_switch_host_mdb_del(ds, info);
1049 		break;
1050 	case DSA_NOTIFIER_VLAN_ADD:
1051 		err = dsa_switch_vlan_add(ds, info);
1052 		break;
1053 	case DSA_NOTIFIER_VLAN_DEL:
1054 		err = dsa_switch_vlan_del(ds, info);
1055 		break;
1056 	case DSA_NOTIFIER_HOST_VLAN_ADD:
1057 		err = dsa_switch_host_vlan_add(ds, info);
1058 		break;
1059 	case DSA_NOTIFIER_HOST_VLAN_DEL:
1060 		err = dsa_switch_host_vlan_del(ds, info);
1061 		break;
1062 	case DSA_NOTIFIER_MTU:
1063 		err = dsa_switch_mtu(ds, info);
1064 		break;
1065 	case DSA_NOTIFIER_TAG_PROTO:
1066 		err = dsa_switch_change_tag_proto(ds, info);
1067 		break;
1068 	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
1069 		err = dsa_switch_connect_tag_proto(ds, info);
1070 		break;
1071 	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
1072 		err = dsa_switch_disconnect_tag_proto(ds, info);
1073 		break;
1074 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1075 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1076 		break;
1077 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1078 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1079 		break;
1080 	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1081 		err = dsa_switch_master_state_change(ds, info);
1082 		break;
1083 	default:
1084 		err = -EOPNOTSUPP;
1085 		break;
1086 	}
1087 
1088 	if (err)
1089 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1090 			event, err);
1091 
1092 	return notifier_from_errno(err);
1093 }
1094 
1095 int dsa_switch_register_notifier(struct dsa_switch *ds)
1096 {
1097 	ds->nb.notifier_call = dsa_switch_event;
1098 
1099 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1100 }
1101 
1102 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1103 {
1104 	int err;
1105 
1106 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1107 	if (err)
1108 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1109 }
1110