xref: /openbmc/linux/net/dsa/port.c (revision 0ca8d3ca4561535f97b31e7b8de569c69bc3b27b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 
14 #include "dsa_priv.h"
15 
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 	return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32 
33 int dsa_port_set_state(struct dsa_port *dp, u8 state)
34 {
35 	struct dsa_switch *ds = dp->ds;
36 	int port = dp->index;
37 
38 	if (!ds->ops->port_stp_state_set)
39 		return -EOPNOTSUPP;
40 
41 	ds->ops->port_stp_state_set(ds, port, state);
42 
43 	if (ds->ops->port_fast_age) {
44 		/* Fast age FDB entries or flush appropriate forwarding database
45 		 * for the given port, if we are moving it from Learning or
46 		 * Forwarding state, to Disabled or Blocking or Listening state.
47 		 */
48 
49 		if ((dp->stp_state == BR_STATE_LEARNING ||
50 		     dp->stp_state == BR_STATE_FORWARDING) &&
51 		    (state == BR_STATE_DISABLED ||
52 		     state == BR_STATE_BLOCKING ||
53 		     state == BR_STATE_LISTENING))
54 			ds->ops->port_fast_age(ds, port);
55 	}
56 
57 	dp->stp_state = state;
58 
59 	return 0;
60 }
61 
62 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
63 {
64 	int err;
65 
66 	err = dsa_port_set_state(dp, state);
67 	if (err)
68 		pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
69 }
70 
71 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
72 {
73 	struct dsa_switch *ds = dp->ds;
74 	int port = dp->index;
75 	int err;
76 
77 	if (ds->ops->port_enable) {
78 		err = ds->ops->port_enable(ds, port, phy);
79 		if (err)
80 			return err;
81 	}
82 
83 	if (!dp->bridge_dev)
84 		dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
85 
86 	if (dp->pl)
87 		phylink_start(dp->pl);
88 
89 	return 0;
90 }
91 
92 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
93 {
94 	int err;
95 
96 	rtnl_lock();
97 	err = dsa_port_enable_rt(dp, phy);
98 	rtnl_unlock();
99 
100 	return err;
101 }
102 
103 void dsa_port_disable_rt(struct dsa_port *dp)
104 {
105 	struct dsa_switch *ds = dp->ds;
106 	int port = dp->index;
107 
108 	if (dp->pl)
109 		phylink_stop(dp->pl);
110 
111 	if (!dp->bridge_dev)
112 		dsa_port_set_state_now(dp, BR_STATE_DISABLED);
113 
114 	if (ds->ops->port_disable)
115 		ds->ops->port_disable(ds, port);
116 }
117 
118 void dsa_port_disable(struct dsa_port *dp)
119 {
120 	rtnl_lock();
121 	dsa_port_disable_rt(dp);
122 	rtnl_unlock();
123 }
124 
125 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
126 					 struct netlink_ext_ack *extack)
127 {
128 	const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
129 				   BR_BCAST_FLOOD;
130 	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
131 	int flag, err;
132 
133 	for_each_set_bit(flag, &mask, 32) {
134 		struct switchdev_brport_flags flags = {0};
135 
136 		flags.mask = BIT(flag);
137 
138 		if (br_port_flag_is_set(brport_dev, BIT(flag)))
139 			flags.val = BIT(flag);
140 
141 		err = dsa_port_bridge_flags(dp, flags, extack);
142 		if (err && err != -EOPNOTSUPP)
143 			return err;
144 	}
145 
146 	return 0;
147 }
148 
149 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
150 {
151 	const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
152 	const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
153 				   BR_BCAST_FLOOD;
154 	int flag, err;
155 
156 	for_each_set_bit(flag, &mask, 32) {
157 		struct switchdev_brport_flags flags = {0};
158 
159 		flags.mask = BIT(flag);
160 		flags.val = val & BIT(flag);
161 
162 		err = dsa_port_bridge_flags(dp, flags, NULL);
163 		if (err && err != -EOPNOTSUPP)
164 			dev_err(dp->ds->dev,
165 				"failed to clear bridge port flag %lu: %pe\n",
166 				flags.val, ERR_PTR(err));
167 	}
168 }
169 
170 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
171 					 struct netlink_ext_ack *extack)
172 {
173 	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
174 	struct net_device *br = dp->bridge_dev;
175 	int err;
176 
177 	err = dsa_port_inherit_brport_flags(dp, extack);
178 	if (err)
179 		return err;
180 
181 	err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev));
182 	if (err && err != -EOPNOTSUPP)
183 		return err;
184 
185 	err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
186 	if (err && err != -EOPNOTSUPP)
187 		return err;
188 
189 	err = dsa_port_mrouter(dp->cpu_dp, br_multicast_router(br), extack);
190 	if (err && err != -EOPNOTSUPP)
191 		return err;
192 
193 	err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
194 	if (err && err != -EOPNOTSUPP)
195 		return err;
196 
197 	return 0;
198 }
199 
200 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
201 {
202 	/* Configure the port for standalone mode (no address learning,
203 	 * flood everything).
204 	 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
205 	 * when the user requests it through netlink or sysfs, but not
206 	 * automatically at port join or leave, so we need to handle resetting
207 	 * the brport flags ourselves. But we even prefer it that way, because
208 	 * otherwise, some setups might never get the notification they need,
209 	 * for example, when a port leaves a LAG that offloads the bridge,
210 	 * it becomes standalone, but as far as the bridge is concerned, no
211 	 * port ever left.
212 	 */
213 	dsa_port_clear_brport_flags(dp);
214 
215 	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
216 	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
217 	 */
218 	dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
219 
220 	/* VLAN filtering is handled by dsa_switch_bridge_leave */
221 
222 	/* Some drivers treat the notification for having a local multicast
223 	 * router by allowing multicast to be flooded to the CPU, so we should
224 	 * allow this in standalone mode too.
225 	 */
226 	dsa_port_mrouter(dp->cpu_dp, true, NULL);
227 
228 	/* Ageing time may be global to the switch chip, so don't change it
229 	 * here because we have no good reason (or value) to change it to.
230 	 */
231 }
232 
233 static int dsa_tree_find_bridge_num(struct dsa_switch_tree *dst,
234 				    struct net_device *bridge_dev)
235 {
236 	struct dsa_port *dp;
237 
238 	/* When preparing the offload for a port, it will have a valid
239 	 * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
240 	 * However there might be other ports having the same dp->bridge_dev
241 	 * and a valid dp->bridge_num, so just ignore this port.
242 	 */
243 	list_for_each_entry(dp, &dst->ports, list)
244 		if (dp->bridge_dev == bridge_dev && dp->bridge_num != -1)
245 			return dp->bridge_num;
246 
247 	return -1;
248 }
249 
250 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
251 					     struct net_device *bridge_dev)
252 {
253 	struct dsa_switch_tree *dst = dp->ds->dst;
254 	int bridge_num = dp->bridge_num;
255 	struct dsa_switch *ds = dp->ds;
256 
257 	/* No bridge TX forwarding offload => do nothing */
258 	if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
259 		return;
260 
261 	dp->bridge_num = -1;
262 
263 	/* Check if the bridge is still in use, otherwise it is time
264 	 * to clean it up so we can reuse this bridge_num later.
265 	 */
266 	if (!dsa_tree_find_bridge_num(dst, bridge_dev))
267 		clear_bit(bridge_num, &dst->fwd_offloading_bridges);
268 
269 	/* Notify the chips only once the offload has been deactivated, so
270 	 * that they can update their configuration accordingly.
271 	 */
272 	ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
273 					      bridge_num);
274 }
275 
276 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
277 					   struct net_device *bridge_dev)
278 {
279 	struct dsa_switch_tree *dst = dp->ds->dst;
280 	struct dsa_switch *ds = dp->ds;
281 	int bridge_num, err;
282 
283 	if (!ds->ops->port_bridge_tx_fwd_offload)
284 		return false;
285 
286 	bridge_num = dsa_tree_find_bridge_num(dst, bridge_dev);
287 	if (bridge_num < 0) {
288 		/* First port that offloads TX forwarding for this bridge */
289 		bridge_num = find_first_zero_bit(&dst->fwd_offloading_bridges,
290 						 DSA_MAX_NUM_OFFLOADING_BRIDGES);
291 		if (bridge_num >= ds->num_fwd_offloading_bridges)
292 			return false;
293 
294 		set_bit(bridge_num, &dst->fwd_offloading_bridges);
295 	}
296 
297 	dp->bridge_num = bridge_num;
298 
299 	/* Notify the driver */
300 	err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
301 						  bridge_num);
302 	if (err) {
303 		dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
304 		return false;
305 	}
306 
307 	return true;
308 }
309 
310 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
311 			 struct netlink_ext_ack *extack)
312 {
313 	struct dsa_notifier_bridge_info info = {
314 		.tree_index = dp->ds->dst->index,
315 		.sw_index = dp->ds->index,
316 		.port = dp->index,
317 		.br = br,
318 	};
319 	struct net_device *dev = dp->slave;
320 	struct net_device *brport_dev;
321 	bool tx_fwd_offload;
322 	int err;
323 
324 	/* Here the interface is already bridged. Reflect the current
325 	 * configuration so that drivers can program their chips accordingly.
326 	 */
327 	dp->bridge_dev = br;
328 
329 	brport_dev = dsa_port_to_bridge_port(dp);
330 
331 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
332 	if (err)
333 		goto out_rollback;
334 
335 	tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
336 
337 	err = switchdev_bridge_port_offload(brport_dev, dev, dp,
338 					    &dsa_slave_switchdev_notifier,
339 					    &dsa_slave_switchdev_blocking_notifier,
340 					    tx_fwd_offload, extack);
341 	if (err)
342 		goto out_rollback_unbridge;
343 
344 	err = dsa_port_switchdev_sync_attrs(dp, extack);
345 	if (err)
346 		goto out_rollback_unoffload;
347 
348 	return 0;
349 
350 out_rollback_unoffload:
351 	switchdev_bridge_port_unoffload(brport_dev, dp,
352 					&dsa_slave_switchdev_notifier,
353 					&dsa_slave_switchdev_blocking_notifier);
354 out_rollback_unbridge:
355 	dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
356 out_rollback:
357 	dp->bridge_dev = NULL;
358 	return err;
359 }
360 
361 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
362 {
363 	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
364 
365 	switchdev_bridge_port_unoffload(brport_dev, dp,
366 					&dsa_slave_switchdev_notifier,
367 					&dsa_slave_switchdev_blocking_notifier);
368 }
369 
370 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
371 {
372 	struct dsa_notifier_bridge_info info = {
373 		.tree_index = dp->ds->dst->index,
374 		.sw_index = dp->ds->index,
375 		.port = dp->index,
376 		.br = br,
377 	};
378 	int err;
379 
380 	/* Here the port is already unbridged. Reflect the current configuration
381 	 * so that drivers can program their chips accordingly.
382 	 */
383 	dp->bridge_dev = NULL;
384 
385 	dsa_port_bridge_tx_fwd_unoffload(dp, br);
386 
387 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
388 	if (err)
389 		pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
390 
391 	dsa_port_switchdev_unsync_attrs(dp);
392 }
393 
394 int dsa_port_lag_change(struct dsa_port *dp,
395 			struct netdev_lag_lower_state_info *linfo)
396 {
397 	struct dsa_notifier_lag_info info = {
398 		.sw_index = dp->ds->index,
399 		.port = dp->index,
400 	};
401 	bool tx_enabled;
402 
403 	if (!dp->lag_dev)
404 		return 0;
405 
406 	/* On statically configured aggregates (e.g. loadbalance
407 	 * without LACP) ports will always be tx_enabled, even if the
408 	 * link is down. Thus we require both link_up and tx_enabled
409 	 * in order to include it in the tx set.
410 	 */
411 	tx_enabled = linfo->link_up && linfo->tx_enabled;
412 
413 	if (tx_enabled == dp->lag_tx_enabled)
414 		return 0;
415 
416 	dp->lag_tx_enabled = tx_enabled;
417 
418 	return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
419 }
420 
421 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
422 		      struct netdev_lag_upper_info *uinfo,
423 		      struct netlink_ext_ack *extack)
424 {
425 	struct dsa_notifier_lag_info info = {
426 		.sw_index = dp->ds->index,
427 		.port = dp->index,
428 		.lag = lag,
429 		.info = uinfo,
430 	};
431 	struct net_device *bridge_dev;
432 	int err;
433 
434 	dsa_lag_map(dp->ds->dst, lag);
435 	dp->lag_dev = lag;
436 
437 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
438 	if (err)
439 		goto err_lag_join;
440 
441 	bridge_dev = netdev_master_upper_dev_get(lag);
442 	if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
443 		return 0;
444 
445 	err = dsa_port_bridge_join(dp, bridge_dev, extack);
446 	if (err)
447 		goto err_bridge_join;
448 
449 	return 0;
450 
451 err_bridge_join:
452 	dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
453 err_lag_join:
454 	dp->lag_dev = NULL;
455 	dsa_lag_unmap(dp->ds->dst, lag);
456 	return err;
457 }
458 
459 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
460 {
461 	if (dp->bridge_dev)
462 		dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
463 }
464 
465 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
466 {
467 	struct dsa_notifier_lag_info info = {
468 		.sw_index = dp->ds->index,
469 		.port = dp->index,
470 		.lag = lag,
471 	};
472 	int err;
473 
474 	if (!dp->lag_dev)
475 		return;
476 
477 	/* Port might have been part of a LAG that in turn was
478 	 * attached to a bridge.
479 	 */
480 	if (dp->bridge_dev)
481 		dsa_port_bridge_leave(dp, dp->bridge_dev);
482 
483 	dp->lag_tx_enabled = false;
484 	dp->lag_dev = NULL;
485 
486 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
487 	if (err)
488 		pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
489 		       err);
490 
491 	dsa_lag_unmap(dp->ds->dst, lag);
492 }
493 
494 /* Must be called under rcu_read_lock() */
495 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
496 					      bool vlan_filtering,
497 					      struct netlink_ext_ack *extack)
498 {
499 	struct dsa_switch *ds = dp->ds;
500 	int err, i;
501 
502 	/* VLAN awareness was off, so the question is "can we turn it on".
503 	 * We may have had 8021q uppers, those need to go. Make sure we don't
504 	 * enter an inconsistent state: deny changing the VLAN awareness state
505 	 * as long as we have 8021q uppers.
506 	 */
507 	if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
508 		struct net_device *upper_dev, *slave = dp->slave;
509 		struct net_device *br = dp->bridge_dev;
510 		struct list_head *iter;
511 
512 		netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
513 			struct bridge_vlan_info br_info;
514 			u16 vid;
515 
516 			if (!is_vlan_dev(upper_dev))
517 				continue;
518 
519 			vid = vlan_dev_vlan_id(upper_dev);
520 
521 			/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
522 			 * device, respectively the VID is not found, returning
523 			 * 0 means success, which is a failure for us here.
524 			 */
525 			err = br_vlan_get_info(br, vid, &br_info);
526 			if (err == 0) {
527 				NL_SET_ERR_MSG_MOD(extack,
528 						   "Must first remove VLAN uppers having VIDs also present in bridge");
529 				return false;
530 			}
531 		}
532 	}
533 
534 	if (!ds->vlan_filtering_is_global)
535 		return true;
536 
537 	/* For cases where enabling/disabling VLAN awareness is global to the
538 	 * switch, we need to handle the case where multiple bridges span
539 	 * different ports of the same switch device and one of them has a
540 	 * different setting than what is being requested.
541 	 */
542 	for (i = 0; i < ds->num_ports; i++) {
543 		struct net_device *other_bridge;
544 
545 		other_bridge = dsa_to_port(ds, i)->bridge_dev;
546 		if (!other_bridge)
547 			continue;
548 		/* If it's the same bridge, it also has same
549 		 * vlan_filtering setting => no need to check
550 		 */
551 		if (other_bridge == dp->bridge_dev)
552 			continue;
553 		if (br_vlan_enabled(other_bridge) != vlan_filtering) {
554 			NL_SET_ERR_MSG_MOD(extack,
555 					   "VLAN filtering is a global setting");
556 			return false;
557 		}
558 	}
559 	return true;
560 }
561 
562 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
563 			    struct netlink_ext_ack *extack)
564 {
565 	struct dsa_switch *ds = dp->ds;
566 	bool apply;
567 	int err;
568 
569 	if (!ds->ops->port_vlan_filtering)
570 		return -EOPNOTSUPP;
571 
572 	/* We are called from dsa_slave_switchdev_blocking_event(),
573 	 * which is not under rcu_read_lock(), unlike
574 	 * dsa_slave_switchdev_event().
575 	 */
576 	rcu_read_lock();
577 	apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
578 	rcu_read_unlock();
579 	if (!apply)
580 		return -EINVAL;
581 
582 	if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
583 		return 0;
584 
585 	err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
586 					   extack);
587 	if (err)
588 		return err;
589 
590 	if (ds->vlan_filtering_is_global)
591 		ds->vlan_filtering = vlan_filtering;
592 	else
593 		dp->vlan_filtering = vlan_filtering;
594 
595 	return 0;
596 }
597 
598 /* This enforces legacy behavior for switch drivers which assume they can't
599  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
600  */
601 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
602 {
603 	struct dsa_switch *ds = dp->ds;
604 
605 	if (!dp->bridge_dev)
606 		return false;
607 
608 	return (!ds->configure_vlan_while_not_filtering &&
609 		!br_vlan_enabled(dp->bridge_dev));
610 }
611 
612 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
613 {
614 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
615 	unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
616 	struct dsa_notifier_ageing_time_info info;
617 	int err;
618 
619 	info.ageing_time = ageing_time;
620 
621 	err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
622 	if (err)
623 		return err;
624 
625 	dp->ageing_time = ageing_time;
626 
627 	return 0;
628 }
629 
630 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
631 			      struct switchdev_brport_flags flags,
632 			      struct netlink_ext_ack *extack)
633 {
634 	struct dsa_switch *ds = dp->ds;
635 
636 	if (!ds->ops->port_pre_bridge_flags)
637 		return -EINVAL;
638 
639 	return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
640 }
641 
642 int dsa_port_bridge_flags(const struct dsa_port *dp,
643 			  struct switchdev_brport_flags flags,
644 			  struct netlink_ext_ack *extack)
645 {
646 	struct dsa_switch *ds = dp->ds;
647 
648 	if (!ds->ops->port_bridge_flags)
649 		return -EOPNOTSUPP;
650 
651 	return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
652 }
653 
654 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
655 		     struct netlink_ext_ack *extack)
656 {
657 	struct dsa_switch *ds = dp->ds;
658 
659 	if (!ds->ops->port_set_mrouter)
660 		return -EOPNOTSUPP;
661 
662 	return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
663 }
664 
665 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
666 			bool targeted_match)
667 {
668 	struct dsa_notifier_mtu_info info = {
669 		.sw_index = dp->ds->index,
670 		.targeted_match = targeted_match,
671 		.port = dp->index,
672 		.mtu = new_mtu,
673 	};
674 
675 	return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
676 }
677 
678 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
679 		     u16 vid)
680 {
681 	struct dsa_notifier_fdb_info info = {
682 		.sw_index = dp->ds->index,
683 		.port = dp->index,
684 		.addr = addr,
685 		.vid = vid,
686 	};
687 
688 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
689 }
690 
691 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
692 		     u16 vid)
693 {
694 	struct dsa_notifier_fdb_info info = {
695 		.sw_index = dp->ds->index,
696 		.port = dp->index,
697 		.addr = addr,
698 		.vid = vid,
699 
700 	};
701 
702 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
703 }
704 
705 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
706 			  u16 vid)
707 {
708 	struct dsa_notifier_fdb_info info = {
709 		.sw_index = dp->ds->index,
710 		.port = dp->index,
711 		.addr = addr,
712 		.vid = vid,
713 	};
714 	struct dsa_port *cpu_dp = dp->cpu_dp;
715 	int err;
716 
717 	err = dev_uc_add(cpu_dp->master, addr);
718 	if (err)
719 		return err;
720 
721 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
722 }
723 
724 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
725 			  u16 vid)
726 {
727 	struct dsa_notifier_fdb_info info = {
728 		.sw_index = dp->ds->index,
729 		.port = dp->index,
730 		.addr = addr,
731 		.vid = vid,
732 	};
733 	struct dsa_port *cpu_dp = dp->cpu_dp;
734 	int err;
735 
736 	err = dev_uc_del(cpu_dp->master, addr);
737 	if (err)
738 		return err;
739 
740 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
741 }
742 
743 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
744 {
745 	struct dsa_switch *ds = dp->ds;
746 	int port = dp->index;
747 
748 	if (!ds->ops->port_fdb_dump)
749 		return -EOPNOTSUPP;
750 
751 	return ds->ops->port_fdb_dump(ds, port, cb, data);
752 }
753 
754 int dsa_port_mdb_add(const struct dsa_port *dp,
755 		     const struct switchdev_obj_port_mdb *mdb)
756 {
757 	struct dsa_notifier_mdb_info info = {
758 		.sw_index = dp->ds->index,
759 		.port = dp->index,
760 		.mdb = mdb,
761 	};
762 
763 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
764 }
765 
766 int dsa_port_mdb_del(const struct dsa_port *dp,
767 		     const struct switchdev_obj_port_mdb *mdb)
768 {
769 	struct dsa_notifier_mdb_info info = {
770 		.sw_index = dp->ds->index,
771 		.port = dp->index,
772 		.mdb = mdb,
773 	};
774 
775 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
776 }
777 
778 int dsa_port_host_mdb_add(const struct dsa_port *dp,
779 			  const struct switchdev_obj_port_mdb *mdb)
780 {
781 	struct dsa_notifier_mdb_info info = {
782 		.sw_index = dp->ds->index,
783 		.port = dp->index,
784 		.mdb = mdb,
785 	};
786 	struct dsa_port *cpu_dp = dp->cpu_dp;
787 	int err;
788 
789 	err = dev_mc_add(cpu_dp->master, mdb->addr);
790 	if (err)
791 		return err;
792 
793 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
794 }
795 
796 int dsa_port_host_mdb_del(const struct dsa_port *dp,
797 			  const struct switchdev_obj_port_mdb *mdb)
798 {
799 	struct dsa_notifier_mdb_info info = {
800 		.sw_index = dp->ds->index,
801 		.port = dp->index,
802 		.mdb = mdb,
803 	};
804 	struct dsa_port *cpu_dp = dp->cpu_dp;
805 	int err;
806 
807 	err = dev_mc_del(cpu_dp->master, mdb->addr);
808 	if (err)
809 		return err;
810 
811 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
812 }
813 
814 int dsa_port_vlan_add(struct dsa_port *dp,
815 		      const struct switchdev_obj_port_vlan *vlan,
816 		      struct netlink_ext_ack *extack)
817 {
818 	struct dsa_notifier_vlan_info info = {
819 		.sw_index = dp->ds->index,
820 		.port = dp->index,
821 		.vlan = vlan,
822 		.extack = extack,
823 	};
824 
825 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
826 }
827 
828 int dsa_port_vlan_del(struct dsa_port *dp,
829 		      const struct switchdev_obj_port_vlan *vlan)
830 {
831 	struct dsa_notifier_vlan_info info = {
832 		.sw_index = dp->ds->index,
833 		.port = dp->index,
834 		.vlan = vlan,
835 	};
836 
837 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
838 }
839 
840 int dsa_port_mrp_add(const struct dsa_port *dp,
841 		     const struct switchdev_obj_mrp *mrp)
842 {
843 	struct dsa_notifier_mrp_info info = {
844 		.sw_index = dp->ds->index,
845 		.port = dp->index,
846 		.mrp = mrp,
847 	};
848 
849 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
850 }
851 
852 int dsa_port_mrp_del(const struct dsa_port *dp,
853 		     const struct switchdev_obj_mrp *mrp)
854 {
855 	struct dsa_notifier_mrp_info info = {
856 		.sw_index = dp->ds->index,
857 		.port = dp->index,
858 		.mrp = mrp,
859 	};
860 
861 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
862 }
863 
864 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
865 			       const struct switchdev_obj_ring_role_mrp *mrp)
866 {
867 	struct dsa_notifier_mrp_ring_role_info info = {
868 		.sw_index = dp->ds->index,
869 		.port = dp->index,
870 		.mrp = mrp,
871 	};
872 
873 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
874 }
875 
876 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
877 			       const struct switchdev_obj_ring_role_mrp *mrp)
878 {
879 	struct dsa_notifier_mrp_ring_role_info info = {
880 		.sw_index = dp->ds->index,
881 		.port = dp->index,
882 		.mrp = mrp,
883 	};
884 
885 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
886 }
887 
888 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
889 			       const struct dsa_device_ops *tag_ops)
890 {
891 	cpu_dp->rcv = tag_ops->rcv;
892 	cpu_dp->tag_ops = tag_ops;
893 }
894 
895 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
896 {
897 	struct device_node *phy_dn;
898 	struct phy_device *phydev;
899 
900 	phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
901 	if (!phy_dn)
902 		return NULL;
903 
904 	phydev = of_phy_find_device(phy_dn);
905 	if (!phydev) {
906 		of_node_put(phy_dn);
907 		return ERR_PTR(-EPROBE_DEFER);
908 	}
909 
910 	of_node_put(phy_dn);
911 	return phydev;
912 }
913 
914 static void dsa_port_phylink_validate(struct phylink_config *config,
915 				      unsigned long *supported,
916 				      struct phylink_link_state *state)
917 {
918 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
919 	struct dsa_switch *ds = dp->ds;
920 
921 	if (!ds->ops->phylink_validate)
922 		return;
923 
924 	ds->ops->phylink_validate(ds, dp->index, supported, state);
925 }
926 
927 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
928 					       struct phylink_link_state *state)
929 {
930 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
931 	struct dsa_switch *ds = dp->ds;
932 	int err;
933 
934 	/* Only called for inband modes */
935 	if (!ds->ops->phylink_mac_link_state) {
936 		state->link = 0;
937 		return;
938 	}
939 
940 	err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
941 	if (err < 0) {
942 		dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
943 			dp->index, err);
944 		state->link = 0;
945 	}
946 }
947 
948 static void dsa_port_phylink_mac_config(struct phylink_config *config,
949 					unsigned int mode,
950 					const struct phylink_link_state *state)
951 {
952 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
953 	struct dsa_switch *ds = dp->ds;
954 
955 	if (!ds->ops->phylink_mac_config)
956 		return;
957 
958 	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
959 }
960 
961 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
962 {
963 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
964 	struct dsa_switch *ds = dp->ds;
965 
966 	if (!ds->ops->phylink_mac_an_restart)
967 		return;
968 
969 	ds->ops->phylink_mac_an_restart(ds, dp->index);
970 }
971 
972 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
973 					   unsigned int mode,
974 					   phy_interface_t interface)
975 {
976 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
977 	struct phy_device *phydev = NULL;
978 	struct dsa_switch *ds = dp->ds;
979 
980 	if (dsa_is_user_port(ds, dp->index))
981 		phydev = dp->slave->phydev;
982 
983 	if (!ds->ops->phylink_mac_link_down) {
984 		if (ds->ops->adjust_link && phydev)
985 			ds->ops->adjust_link(ds, dp->index, phydev);
986 		return;
987 	}
988 
989 	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
990 }
991 
992 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
993 					 struct phy_device *phydev,
994 					 unsigned int mode,
995 					 phy_interface_t interface,
996 					 int speed, int duplex,
997 					 bool tx_pause, bool rx_pause)
998 {
999 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1000 	struct dsa_switch *ds = dp->ds;
1001 
1002 	if (!ds->ops->phylink_mac_link_up) {
1003 		if (ds->ops->adjust_link && phydev)
1004 			ds->ops->adjust_link(ds, dp->index, phydev);
1005 		return;
1006 	}
1007 
1008 	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1009 				     speed, duplex, tx_pause, rx_pause);
1010 }
1011 
1012 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1013 	.validate = dsa_port_phylink_validate,
1014 	.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1015 	.mac_config = dsa_port_phylink_mac_config,
1016 	.mac_an_restart = dsa_port_phylink_mac_an_restart,
1017 	.mac_link_down = dsa_port_phylink_mac_link_down,
1018 	.mac_link_up = dsa_port_phylink_mac_link_up,
1019 };
1020 
1021 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1022 {
1023 	struct dsa_switch *ds = dp->ds;
1024 	struct phy_device *phydev;
1025 	int port = dp->index;
1026 	int err = 0;
1027 
1028 	phydev = dsa_port_get_phy_device(dp);
1029 	if (!phydev)
1030 		return 0;
1031 
1032 	if (IS_ERR(phydev))
1033 		return PTR_ERR(phydev);
1034 
1035 	if (enable) {
1036 		err = genphy_resume(phydev);
1037 		if (err < 0)
1038 			goto err_put_dev;
1039 
1040 		err = genphy_read_status(phydev);
1041 		if (err < 0)
1042 			goto err_put_dev;
1043 	} else {
1044 		err = genphy_suspend(phydev);
1045 		if (err < 0)
1046 			goto err_put_dev;
1047 	}
1048 
1049 	if (ds->ops->adjust_link)
1050 		ds->ops->adjust_link(ds, port, phydev);
1051 
1052 	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1053 
1054 err_put_dev:
1055 	put_device(&phydev->mdio.dev);
1056 	return err;
1057 }
1058 
1059 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1060 {
1061 	struct device_node *dn = dp->dn;
1062 	struct dsa_switch *ds = dp->ds;
1063 	struct phy_device *phydev;
1064 	int port = dp->index;
1065 	phy_interface_t mode;
1066 	int err;
1067 
1068 	err = of_phy_register_fixed_link(dn);
1069 	if (err) {
1070 		dev_err(ds->dev,
1071 			"failed to register the fixed PHY of port %d\n",
1072 			port);
1073 		return err;
1074 	}
1075 
1076 	phydev = of_phy_find_device(dn);
1077 
1078 	err = of_get_phy_mode(dn, &mode);
1079 	if (err)
1080 		mode = PHY_INTERFACE_MODE_NA;
1081 	phydev->interface = mode;
1082 
1083 	genphy_read_status(phydev);
1084 
1085 	if (ds->ops->adjust_link)
1086 		ds->ops->adjust_link(ds, port, phydev);
1087 
1088 	put_device(&phydev->mdio.dev);
1089 
1090 	return 0;
1091 }
1092 
1093 static int dsa_port_phylink_register(struct dsa_port *dp)
1094 {
1095 	struct dsa_switch *ds = dp->ds;
1096 	struct device_node *port_dn = dp->dn;
1097 	phy_interface_t mode;
1098 	int err;
1099 
1100 	err = of_get_phy_mode(port_dn, &mode);
1101 	if (err)
1102 		mode = PHY_INTERFACE_MODE_NA;
1103 
1104 	dp->pl_config.dev = ds->dev;
1105 	dp->pl_config.type = PHYLINK_DEV;
1106 	dp->pl_config.pcs_poll = ds->pcs_poll;
1107 
1108 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1109 				mode, &dsa_port_phylink_mac_ops);
1110 	if (IS_ERR(dp->pl)) {
1111 		pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1112 		return PTR_ERR(dp->pl);
1113 	}
1114 
1115 	err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1116 	if (err && err != -ENODEV) {
1117 		pr_err("could not attach to PHY: %d\n", err);
1118 		goto err_phy_connect;
1119 	}
1120 
1121 	return 0;
1122 
1123 err_phy_connect:
1124 	phylink_destroy(dp->pl);
1125 	return err;
1126 }
1127 
1128 int dsa_port_link_register_of(struct dsa_port *dp)
1129 {
1130 	struct dsa_switch *ds = dp->ds;
1131 	struct device_node *phy_np;
1132 	int port = dp->index;
1133 
1134 	if (!ds->ops->adjust_link) {
1135 		phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1136 		if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1137 			if (ds->ops->phylink_mac_link_down)
1138 				ds->ops->phylink_mac_link_down(ds, port,
1139 					MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1140 			return dsa_port_phylink_register(dp);
1141 		}
1142 		return 0;
1143 	}
1144 
1145 	dev_warn(ds->dev,
1146 		 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1147 
1148 	if (of_phy_is_fixed_link(dp->dn))
1149 		return dsa_port_fixed_link_register_of(dp);
1150 	else
1151 		return dsa_port_setup_phy_of(dp, true);
1152 }
1153 
1154 void dsa_port_link_unregister_of(struct dsa_port *dp)
1155 {
1156 	struct dsa_switch *ds = dp->ds;
1157 
1158 	if (!ds->ops->adjust_link && dp->pl) {
1159 		rtnl_lock();
1160 		phylink_disconnect_phy(dp->pl);
1161 		rtnl_unlock();
1162 		phylink_destroy(dp->pl);
1163 		dp->pl = NULL;
1164 		return;
1165 	}
1166 
1167 	if (of_phy_is_fixed_link(dp->dn))
1168 		of_phy_deregister_fixed_link(dp->dn);
1169 	else
1170 		dsa_port_setup_phy_of(dp, false);
1171 }
1172 
1173 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1174 {
1175 	struct phy_device *phydev;
1176 	int ret = -EOPNOTSUPP;
1177 
1178 	if (of_phy_is_fixed_link(dp->dn))
1179 		return ret;
1180 
1181 	phydev = dsa_port_get_phy_device(dp);
1182 	if (IS_ERR_OR_NULL(phydev))
1183 		return ret;
1184 
1185 	ret = phy_ethtool_get_strings(phydev, data);
1186 	put_device(&phydev->mdio.dev);
1187 
1188 	return ret;
1189 }
1190 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1191 
1192 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1193 {
1194 	struct phy_device *phydev;
1195 	int ret = -EOPNOTSUPP;
1196 
1197 	if (of_phy_is_fixed_link(dp->dn))
1198 		return ret;
1199 
1200 	phydev = dsa_port_get_phy_device(dp);
1201 	if (IS_ERR_OR_NULL(phydev))
1202 		return ret;
1203 
1204 	ret = phy_ethtool_get_stats(phydev, NULL, data);
1205 	put_device(&phydev->mdio.dev);
1206 
1207 	return ret;
1208 }
1209 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1210 
1211 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1212 {
1213 	struct phy_device *phydev;
1214 	int ret = -EOPNOTSUPP;
1215 
1216 	if (of_phy_is_fixed_link(dp->dn))
1217 		return ret;
1218 
1219 	phydev = dsa_port_get_phy_device(dp);
1220 	if (IS_ERR_OR_NULL(phydev))
1221 		return ret;
1222 
1223 	ret = phy_ethtool_get_sset_count(phydev);
1224 	put_device(&phydev->mdio.dev);
1225 
1226 	return ret;
1227 }
1228 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1229 
1230 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1231 {
1232 	struct dsa_notifier_hsr_info info = {
1233 		.sw_index = dp->ds->index,
1234 		.port = dp->index,
1235 		.hsr = hsr,
1236 	};
1237 	int err;
1238 
1239 	dp->hsr_dev = hsr;
1240 
1241 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1242 	if (err)
1243 		dp->hsr_dev = NULL;
1244 
1245 	return err;
1246 }
1247 
1248 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1249 {
1250 	struct dsa_notifier_hsr_info info = {
1251 		.sw_index = dp->ds->index,
1252 		.port = dp->index,
1253 		.hsr = hsr,
1254 	};
1255 	int err;
1256 
1257 	dp->hsr_dev = NULL;
1258 
1259 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1260 	if (err)
1261 		pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
1262 }
1263 
1264 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid)
1265 {
1266 	struct dsa_notifier_tag_8021q_vlan_info info = {
1267 		.tree_index = dp->ds->dst->index,
1268 		.sw_index = dp->ds->index,
1269 		.port = dp->index,
1270 		.vid = vid,
1271 	};
1272 
1273 	return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1274 }
1275 
1276 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
1277 {
1278 	struct dsa_notifier_tag_8021q_vlan_info info = {
1279 		.tree_index = dp->ds->dst->index,
1280 		.sw_index = dp->ds->index,
1281 		.port = dp->index,
1282 		.vid = vid,
1283 	};
1284 	int err;
1285 
1286 	err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1287 	if (err)
1288 		pr_err("DSA: failed to notify tag_8021q VLAN deletion: %pe\n",
1289 		       ERR_PTR(err));
1290 }
1291