xref: /openbmc/linux/net/dsa/port.c (revision 4e51bf44a03af6fa19a39a36ea8fedfacb8ccadf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 
14 #include "dsa_priv.h"
15 
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 	return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32 
33 int dsa_port_set_state(struct dsa_port *dp, u8 state)
34 {
35 	struct dsa_switch *ds = dp->ds;
36 	int port = dp->index;
37 
38 	if (!ds->ops->port_stp_state_set)
39 		return -EOPNOTSUPP;
40 
41 	ds->ops->port_stp_state_set(ds, port, state);
42 
43 	if (ds->ops->port_fast_age) {
44 		/* Fast age FDB entries or flush appropriate forwarding database
45 		 * for the given port, if we are moving it from Learning or
46 		 * Forwarding state, to Disabled or Blocking or Listening state.
47 		 */
48 
49 		if ((dp->stp_state == BR_STATE_LEARNING ||
50 		     dp->stp_state == BR_STATE_FORWARDING) &&
51 		    (state == BR_STATE_DISABLED ||
52 		     state == BR_STATE_BLOCKING ||
53 		     state == BR_STATE_LISTENING))
54 			ds->ops->port_fast_age(ds, port);
55 	}
56 
57 	dp->stp_state = state;
58 
59 	return 0;
60 }
61 
62 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
63 {
64 	int err;
65 
66 	err = dsa_port_set_state(dp, state);
67 	if (err)
68 		pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
69 }
70 
71 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
72 {
73 	struct dsa_switch *ds = dp->ds;
74 	int port = dp->index;
75 	int err;
76 
77 	if (ds->ops->port_enable) {
78 		err = ds->ops->port_enable(ds, port, phy);
79 		if (err)
80 			return err;
81 	}
82 
83 	if (!dp->bridge_dev)
84 		dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
85 
86 	if (dp->pl)
87 		phylink_start(dp->pl);
88 
89 	return 0;
90 }
91 
92 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
93 {
94 	int err;
95 
96 	rtnl_lock();
97 	err = dsa_port_enable_rt(dp, phy);
98 	rtnl_unlock();
99 
100 	return err;
101 }
102 
103 void dsa_port_disable_rt(struct dsa_port *dp)
104 {
105 	struct dsa_switch *ds = dp->ds;
106 	int port = dp->index;
107 
108 	if (dp->pl)
109 		phylink_stop(dp->pl);
110 
111 	if (!dp->bridge_dev)
112 		dsa_port_set_state_now(dp, BR_STATE_DISABLED);
113 
114 	if (ds->ops->port_disable)
115 		ds->ops->port_disable(ds, port);
116 }
117 
118 void dsa_port_disable(struct dsa_port *dp)
119 {
120 	rtnl_lock();
121 	dsa_port_disable_rt(dp);
122 	rtnl_unlock();
123 }
124 
125 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
126 					 struct netlink_ext_ack *extack)
127 {
128 	const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
129 				   BR_BCAST_FLOOD;
130 	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
131 	int flag, err;
132 
133 	for_each_set_bit(flag, &mask, 32) {
134 		struct switchdev_brport_flags flags = {0};
135 
136 		flags.mask = BIT(flag);
137 
138 		if (br_port_flag_is_set(brport_dev, BIT(flag)))
139 			flags.val = BIT(flag);
140 
141 		err = dsa_port_bridge_flags(dp, flags, extack);
142 		if (err && err != -EOPNOTSUPP)
143 			return err;
144 	}
145 
146 	return 0;
147 }
148 
149 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
150 {
151 	const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
152 	const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
153 				   BR_BCAST_FLOOD;
154 	int flag, err;
155 
156 	for_each_set_bit(flag, &mask, 32) {
157 		struct switchdev_brport_flags flags = {0};
158 
159 		flags.mask = BIT(flag);
160 		flags.val = val & BIT(flag);
161 
162 		err = dsa_port_bridge_flags(dp, flags, NULL);
163 		if (err && err != -EOPNOTSUPP)
164 			dev_err(dp->ds->dev,
165 				"failed to clear bridge port flag %lu: %pe\n",
166 				flags.val, ERR_PTR(err));
167 	}
168 }
169 
170 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
171 					 struct netlink_ext_ack *extack)
172 {
173 	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
174 	struct net_device *br = dp->bridge_dev;
175 	int err;
176 
177 	err = dsa_port_inherit_brport_flags(dp, extack);
178 	if (err)
179 		return err;
180 
181 	err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev));
182 	if (err && err != -EOPNOTSUPP)
183 		return err;
184 
185 	err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
186 	if (err && err != -EOPNOTSUPP)
187 		return err;
188 
189 	err = dsa_port_mrouter(dp->cpu_dp, br_multicast_router(br), extack);
190 	if (err && err != -EOPNOTSUPP)
191 		return err;
192 
193 	err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
194 	if (err && err != -EOPNOTSUPP)
195 		return err;
196 
197 	return 0;
198 }
199 
200 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
201 {
202 	/* Configure the port for standalone mode (no address learning,
203 	 * flood everything).
204 	 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
205 	 * when the user requests it through netlink or sysfs, but not
206 	 * automatically at port join or leave, so we need to handle resetting
207 	 * the brport flags ourselves. But we even prefer it that way, because
208 	 * otherwise, some setups might never get the notification they need,
209 	 * for example, when a port leaves a LAG that offloads the bridge,
210 	 * it becomes standalone, but as far as the bridge is concerned, no
211 	 * port ever left.
212 	 */
213 	dsa_port_clear_brport_flags(dp);
214 
215 	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
216 	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
217 	 */
218 	dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
219 
220 	/* VLAN filtering is handled by dsa_switch_bridge_leave */
221 
222 	/* Some drivers treat the notification for having a local multicast
223 	 * router by allowing multicast to be flooded to the CPU, so we should
224 	 * allow this in standalone mode too.
225 	 */
226 	dsa_port_mrouter(dp->cpu_dp, true, NULL);
227 
228 	/* Ageing time may be global to the switch chip, so don't change it
229 	 * here because we have no good reason (or value) to change it to.
230 	 */
231 }
232 
233 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
234 			 struct netlink_ext_ack *extack)
235 {
236 	struct dsa_notifier_bridge_info info = {
237 		.tree_index = dp->ds->dst->index,
238 		.sw_index = dp->ds->index,
239 		.port = dp->index,
240 		.br = br,
241 	};
242 	struct net_device *dev = dp->slave;
243 	struct net_device *brport_dev;
244 	int err;
245 
246 	/* Here the interface is already bridged. Reflect the current
247 	 * configuration so that drivers can program their chips accordingly.
248 	 */
249 	dp->bridge_dev = br;
250 
251 	brport_dev = dsa_port_to_bridge_port(dp);
252 
253 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
254 	if (err)
255 		goto out_rollback;
256 
257 	err = switchdev_bridge_port_offload(brport_dev, dev, dp,
258 					    &dsa_slave_switchdev_notifier,
259 					    &dsa_slave_switchdev_blocking_notifier,
260 					    extack);
261 	if (err)
262 		goto out_rollback_unbridge;
263 
264 	err = dsa_port_switchdev_sync_attrs(dp, extack);
265 	if (err)
266 		goto out_rollback_unoffload;
267 
268 	return 0;
269 
270 out_rollback_unoffload:
271 	switchdev_bridge_port_unoffload(brport_dev, dp,
272 					&dsa_slave_switchdev_notifier,
273 					&dsa_slave_switchdev_blocking_notifier);
274 out_rollback_unbridge:
275 	dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
276 out_rollback:
277 	dp->bridge_dev = NULL;
278 	return err;
279 }
280 
281 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
282 {
283 	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
284 
285 	switchdev_bridge_port_unoffload(brport_dev, dp,
286 					&dsa_slave_switchdev_notifier,
287 					&dsa_slave_switchdev_blocking_notifier);
288 }
289 
290 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
291 {
292 	struct dsa_notifier_bridge_info info = {
293 		.tree_index = dp->ds->dst->index,
294 		.sw_index = dp->ds->index,
295 		.port = dp->index,
296 		.br = br,
297 	};
298 	int err;
299 
300 	/* Here the port is already unbridged. Reflect the current configuration
301 	 * so that drivers can program their chips accordingly.
302 	 */
303 	dp->bridge_dev = NULL;
304 
305 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
306 	if (err)
307 		pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
308 
309 	dsa_port_switchdev_unsync_attrs(dp);
310 }
311 
312 int dsa_port_lag_change(struct dsa_port *dp,
313 			struct netdev_lag_lower_state_info *linfo)
314 {
315 	struct dsa_notifier_lag_info info = {
316 		.sw_index = dp->ds->index,
317 		.port = dp->index,
318 	};
319 	bool tx_enabled;
320 
321 	if (!dp->lag_dev)
322 		return 0;
323 
324 	/* On statically configured aggregates (e.g. loadbalance
325 	 * without LACP) ports will always be tx_enabled, even if the
326 	 * link is down. Thus we require both link_up and tx_enabled
327 	 * in order to include it in the tx set.
328 	 */
329 	tx_enabled = linfo->link_up && linfo->tx_enabled;
330 
331 	if (tx_enabled == dp->lag_tx_enabled)
332 		return 0;
333 
334 	dp->lag_tx_enabled = tx_enabled;
335 
336 	return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
337 }
338 
339 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
340 		      struct netdev_lag_upper_info *uinfo,
341 		      struct netlink_ext_ack *extack)
342 {
343 	struct dsa_notifier_lag_info info = {
344 		.sw_index = dp->ds->index,
345 		.port = dp->index,
346 		.lag = lag,
347 		.info = uinfo,
348 	};
349 	struct net_device *bridge_dev;
350 	int err;
351 
352 	dsa_lag_map(dp->ds->dst, lag);
353 	dp->lag_dev = lag;
354 
355 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
356 	if (err)
357 		goto err_lag_join;
358 
359 	bridge_dev = netdev_master_upper_dev_get(lag);
360 	if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
361 		return 0;
362 
363 	err = dsa_port_bridge_join(dp, bridge_dev, extack);
364 	if (err)
365 		goto err_bridge_join;
366 
367 	return 0;
368 
369 err_bridge_join:
370 	dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
371 err_lag_join:
372 	dp->lag_dev = NULL;
373 	dsa_lag_unmap(dp->ds->dst, lag);
374 	return err;
375 }
376 
377 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
378 {
379 	if (dp->bridge_dev)
380 		dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
381 }
382 
383 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
384 {
385 	struct dsa_notifier_lag_info info = {
386 		.sw_index = dp->ds->index,
387 		.port = dp->index,
388 		.lag = lag,
389 	};
390 	int err;
391 
392 	if (!dp->lag_dev)
393 		return;
394 
395 	/* Port might have been part of a LAG that in turn was
396 	 * attached to a bridge.
397 	 */
398 	if (dp->bridge_dev)
399 		dsa_port_bridge_leave(dp, dp->bridge_dev);
400 
401 	dp->lag_tx_enabled = false;
402 	dp->lag_dev = NULL;
403 
404 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
405 	if (err)
406 		pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
407 		       err);
408 
409 	dsa_lag_unmap(dp->ds->dst, lag);
410 }
411 
412 /* Must be called under rcu_read_lock() */
413 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
414 					      bool vlan_filtering,
415 					      struct netlink_ext_ack *extack)
416 {
417 	struct dsa_switch *ds = dp->ds;
418 	int err, i;
419 
420 	/* VLAN awareness was off, so the question is "can we turn it on".
421 	 * We may have had 8021q uppers, those need to go. Make sure we don't
422 	 * enter an inconsistent state: deny changing the VLAN awareness state
423 	 * as long as we have 8021q uppers.
424 	 */
425 	if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
426 		struct net_device *upper_dev, *slave = dp->slave;
427 		struct net_device *br = dp->bridge_dev;
428 		struct list_head *iter;
429 
430 		netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
431 			struct bridge_vlan_info br_info;
432 			u16 vid;
433 
434 			if (!is_vlan_dev(upper_dev))
435 				continue;
436 
437 			vid = vlan_dev_vlan_id(upper_dev);
438 
439 			/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
440 			 * device, respectively the VID is not found, returning
441 			 * 0 means success, which is a failure for us here.
442 			 */
443 			err = br_vlan_get_info(br, vid, &br_info);
444 			if (err == 0) {
445 				NL_SET_ERR_MSG_MOD(extack,
446 						   "Must first remove VLAN uppers having VIDs also present in bridge");
447 				return false;
448 			}
449 		}
450 	}
451 
452 	if (!ds->vlan_filtering_is_global)
453 		return true;
454 
455 	/* For cases where enabling/disabling VLAN awareness is global to the
456 	 * switch, we need to handle the case where multiple bridges span
457 	 * different ports of the same switch device and one of them has a
458 	 * different setting than what is being requested.
459 	 */
460 	for (i = 0; i < ds->num_ports; i++) {
461 		struct net_device *other_bridge;
462 
463 		other_bridge = dsa_to_port(ds, i)->bridge_dev;
464 		if (!other_bridge)
465 			continue;
466 		/* If it's the same bridge, it also has same
467 		 * vlan_filtering setting => no need to check
468 		 */
469 		if (other_bridge == dp->bridge_dev)
470 			continue;
471 		if (br_vlan_enabled(other_bridge) != vlan_filtering) {
472 			NL_SET_ERR_MSG_MOD(extack,
473 					   "VLAN filtering is a global setting");
474 			return false;
475 		}
476 	}
477 	return true;
478 }
479 
480 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
481 			    struct netlink_ext_ack *extack)
482 {
483 	struct dsa_switch *ds = dp->ds;
484 	bool apply;
485 	int err;
486 
487 	if (!ds->ops->port_vlan_filtering)
488 		return -EOPNOTSUPP;
489 
490 	/* We are called from dsa_slave_switchdev_blocking_event(),
491 	 * which is not under rcu_read_lock(), unlike
492 	 * dsa_slave_switchdev_event().
493 	 */
494 	rcu_read_lock();
495 	apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
496 	rcu_read_unlock();
497 	if (!apply)
498 		return -EINVAL;
499 
500 	if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
501 		return 0;
502 
503 	err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
504 					   extack);
505 	if (err)
506 		return err;
507 
508 	if (ds->vlan_filtering_is_global)
509 		ds->vlan_filtering = vlan_filtering;
510 	else
511 		dp->vlan_filtering = vlan_filtering;
512 
513 	return 0;
514 }
515 
516 /* This enforces legacy behavior for switch drivers which assume they can't
517  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
518  */
519 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
520 {
521 	struct dsa_switch *ds = dp->ds;
522 
523 	if (!dp->bridge_dev)
524 		return false;
525 
526 	return (!ds->configure_vlan_while_not_filtering &&
527 		!br_vlan_enabled(dp->bridge_dev));
528 }
529 
530 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
531 {
532 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
533 	unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
534 	struct dsa_notifier_ageing_time_info info;
535 	int err;
536 
537 	info.ageing_time = ageing_time;
538 
539 	err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
540 	if (err)
541 		return err;
542 
543 	dp->ageing_time = ageing_time;
544 
545 	return 0;
546 }
547 
548 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
549 			      struct switchdev_brport_flags flags,
550 			      struct netlink_ext_ack *extack)
551 {
552 	struct dsa_switch *ds = dp->ds;
553 
554 	if (!ds->ops->port_pre_bridge_flags)
555 		return -EINVAL;
556 
557 	return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
558 }
559 
560 int dsa_port_bridge_flags(const struct dsa_port *dp,
561 			  struct switchdev_brport_flags flags,
562 			  struct netlink_ext_ack *extack)
563 {
564 	struct dsa_switch *ds = dp->ds;
565 
566 	if (!ds->ops->port_bridge_flags)
567 		return -EOPNOTSUPP;
568 
569 	return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
570 }
571 
572 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
573 		     struct netlink_ext_ack *extack)
574 {
575 	struct dsa_switch *ds = dp->ds;
576 
577 	if (!ds->ops->port_set_mrouter)
578 		return -EOPNOTSUPP;
579 
580 	return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
581 }
582 
583 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
584 			bool targeted_match)
585 {
586 	struct dsa_notifier_mtu_info info = {
587 		.sw_index = dp->ds->index,
588 		.targeted_match = targeted_match,
589 		.port = dp->index,
590 		.mtu = new_mtu,
591 	};
592 
593 	return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
594 }
595 
596 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
597 		     u16 vid)
598 {
599 	struct dsa_notifier_fdb_info info = {
600 		.sw_index = dp->ds->index,
601 		.port = dp->index,
602 		.addr = addr,
603 		.vid = vid,
604 	};
605 
606 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
607 }
608 
609 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
610 		     u16 vid)
611 {
612 	struct dsa_notifier_fdb_info info = {
613 		.sw_index = dp->ds->index,
614 		.port = dp->index,
615 		.addr = addr,
616 		.vid = vid,
617 
618 	};
619 
620 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
621 }
622 
623 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
624 			  u16 vid)
625 {
626 	struct dsa_notifier_fdb_info info = {
627 		.sw_index = dp->ds->index,
628 		.port = dp->index,
629 		.addr = addr,
630 		.vid = vid,
631 	};
632 	struct dsa_port *cpu_dp = dp->cpu_dp;
633 	int err;
634 
635 	err = dev_uc_add(cpu_dp->master, addr);
636 	if (err)
637 		return err;
638 
639 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
640 }
641 
642 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
643 			  u16 vid)
644 {
645 	struct dsa_notifier_fdb_info info = {
646 		.sw_index = dp->ds->index,
647 		.port = dp->index,
648 		.addr = addr,
649 		.vid = vid,
650 	};
651 	struct dsa_port *cpu_dp = dp->cpu_dp;
652 	int err;
653 
654 	err = dev_uc_del(cpu_dp->master, addr);
655 	if (err)
656 		return err;
657 
658 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
659 }
660 
661 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
662 {
663 	struct dsa_switch *ds = dp->ds;
664 	int port = dp->index;
665 
666 	if (!ds->ops->port_fdb_dump)
667 		return -EOPNOTSUPP;
668 
669 	return ds->ops->port_fdb_dump(ds, port, cb, data);
670 }
671 
672 int dsa_port_mdb_add(const struct dsa_port *dp,
673 		     const struct switchdev_obj_port_mdb *mdb)
674 {
675 	struct dsa_notifier_mdb_info info = {
676 		.sw_index = dp->ds->index,
677 		.port = dp->index,
678 		.mdb = mdb,
679 	};
680 
681 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
682 }
683 
684 int dsa_port_mdb_del(const struct dsa_port *dp,
685 		     const struct switchdev_obj_port_mdb *mdb)
686 {
687 	struct dsa_notifier_mdb_info info = {
688 		.sw_index = dp->ds->index,
689 		.port = dp->index,
690 		.mdb = mdb,
691 	};
692 
693 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
694 }
695 
696 int dsa_port_host_mdb_add(const struct dsa_port *dp,
697 			  const struct switchdev_obj_port_mdb *mdb)
698 {
699 	struct dsa_notifier_mdb_info info = {
700 		.sw_index = dp->ds->index,
701 		.port = dp->index,
702 		.mdb = mdb,
703 	};
704 	struct dsa_port *cpu_dp = dp->cpu_dp;
705 	int err;
706 
707 	err = dev_mc_add(cpu_dp->master, mdb->addr);
708 	if (err)
709 		return err;
710 
711 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
712 }
713 
714 int dsa_port_host_mdb_del(const struct dsa_port *dp,
715 			  const struct switchdev_obj_port_mdb *mdb)
716 {
717 	struct dsa_notifier_mdb_info info = {
718 		.sw_index = dp->ds->index,
719 		.port = dp->index,
720 		.mdb = mdb,
721 	};
722 	struct dsa_port *cpu_dp = dp->cpu_dp;
723 	int err;
724 
725 	err = dev_mc_del(cpu_dp->master, mdb->addr);
726 	if (err)
727 		return err;
728 
729 	return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
730 }
731 
732 int dsa_port_vlan_add(struct dsa_port *dp,
733 		      const struct switchdev_obj_port_vlan *vlan,
734 		      struct netlink_ext_ack *extack)
735 {
736 	struct dsa_notifier_vlan_info info = {
737 		.sw_index = dp->ds->index,
738 		.port = dp->index,
739 		.vlan = vlan,
740 		.extack = extack,
741 	};
742 
743 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
744 }
745 
746 int dsa_port_vlan_del(struct dsa_port *dp,
747 		      const struct switchdev_obj_port_vlan *vlan)
748 {
749 	struct dsa_notifier_vlan_info info = {
750 		.sw_index = dp->ds->index,
751 		.port = dp->index,
752 		.vlan = vlan,
753 	};
754 
755 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
756 }
757 
758 int dsa_port_mrp_add(const struct dsa_port *dp,
759 		     const struct switchdev_obj_mrp *mrp)
760 {
761 	struct dsa_notifier_mrp_info info = {
762 		.sw_index = dp->ds->index,
763 		.port = dp->index,
764 		.mrp = mrp,
765 	};
766 
767 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
768 }
769 
770 int dsa_port_mrp_del(const struct dsa_port *dp,
771 		     const struct switchdev_obj_mrp *mrp)
772 {
773 	struct dsa_notifier_mrp_info info = {
774 		.sw_index = dp->ds->index,
775 		.port = dp->index,
776 		.mrp = mrp,
777 	};
778 
779 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
780 }
781 
782 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
783 			       const struct switchdev_obj_ring_role_mrp *mrp)
784 {
785 	struct dsa_notifier_mrp_ring_role_info info = {
786 		.sw_index = dp->ds->index,
787 		.port = dp->index,
788 		.mrp = mrp,
789 	};
790 
791 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
792 }
793 
794 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
795 			       const struct switchdev_obj_ring_role_mrp *mrp)
796 {
797 	struct dsa_notifier_mrp_ring_role_info info = {
798 		.sw_index = dp->ds->index,
799 		.port = dp->index,
800 		.mrp = mrp,
801 	};
802 
803 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
804 }
805 
806 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
807 			       const struct dsa_device_ops *tag_ops)
808 {
809 	cpu_dp->filter = tag_ops->filter;
810 	cpu_dp->rcv = tag_ops->rcv;
811 	cpu_dp->tag_ops = tag_ops;
812 }
813 
814 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
815 {
816 	struct device_node *phy_dn;
817 	struct phy_device *phydev;
818 
819 	phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
820 	if (!phy_dn)
821 		return NULL;
822 
823 	phydev = of_phy_find_device(phy_dn);
824 	if (!phydev) {
825 		of_node_put(phy_dn);
826 		return ERR_PTR(-EPROBE_DEFER);
827 	}
828 
829 	of_node_put(phy_dn);
830 	return phydev;
831 }
832 
833 static void dsa_port_phylink_validate(struct phylink_config *config,
834 				      unsigned long *supported,
835 				      struct phylink_link_state *state)
836 {
837 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
838 	struct dsa_switch *ds = dp->ds;
839 
840 	if (!ds->ops->phylink_validate)
841 		return;
842 
843 	ds->ops->phylink_validate(ds, dp->index, supported, state);
844 }
845 
846 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
847 					       struct phylink_link_state *state)
848 {
849 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
850 	struct dsa_switch *ds = dp->ds;
851 	int err;
852 
853 	/* Only called for inband modes */
854 	if (!ds->ops->phylink_mac_link_state) {
855 		state->link = 0;
856 		return;
857 	}
858 
859 	err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
860 	if (err < 0) {
861 		dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
862 			dp->index, err);
863 		state->link = 0;
864 	}
865 }
866 
867 static void dsa_port_phylink_mac_config(struct phylink_config *config,
868 					unsigned int mode,
869 					const struct phylink_link_state *state)
870 {
871 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
872 	struct dsa_switch *ds = dp->ds;
873 
874 	if (!ds->ops->phylink_mac_config)
875 		return;
876 
877 	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
878 }
879 
880 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
881 {
882 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
883 	struct dsa_switch *ds = dp->ds;
884 
885 	if (!ds->ops->phylink_mac_an_restart)
886 		return;
887 
888 	ds->ops->phylink_mac_an_restart(ds, dp->index);
889 }
890 
891 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
892 					   unsigned int mode,
893 					   phy_interface_t interface)
894 {
895 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
896 	struct phy_device *phydev = NULL;
897 	struct dsa_switch *ds = dp->ds;
898 
899 	if (dsa_is_user_port(ds, dp->index))
900 		phydev = dp->slave->phydev;
901 
902 	if (!ds->ops->phylink_mac_link_down) {
903 		if (ds->ops->adjust_link && phydev)
904 			ds->ops->adjust_link(ds, dp->index, phydev);
905 		return;
906 	}
907 
908 	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
909 }
910 
911 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
912 					 struct phy_device *phydev,
913 					 unsigned int mode,
914 					 phy_interface_t interface,
915 					 int speed, int duplex,
916 					 bool tx_pause, bool rx_pause)
917 {
918 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
919 	struct dsa_switch *ds = dp->ds;
920 
921 	if (!ds->ops->phylink_mac_link_up) {
922 		if (ds->ops->adjust_link && phydev)
923 			ds->ops->adjust_link(ds, dp->index, phydev);
924 		return;
925 	}
926 
927 	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
928 				     speed, duplex, tx_pause, rx_pause);
929 }
930 
931 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
932 	.validate = dsa_port_phylink_validate,
933 	.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
934 	.mac_config = dsa_port_phylink_mac_config,
935 	.mac_an_restart = dsa_port_phylink_mac_an_restart,
936 	.mac_link_down = dsa_port_phylink_mac_link_down,
937 	.mac_link_up = dsa_port_phylink_mac_link_up,
938 };
939 
940 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
941 {
942 	struct dsa_switch *ds = dp->ds;
943 	struct phy_device *phydev;
944 	int port = dp->index;
945 	int err = 0;
946 
947 	phydev = dsa_port_get_phy_device(dp);
948 	if (!phydev)
949 		return 0;
950 
951 	if (IS_ERR(phydev))
952 		return PTR_ERR(phydev);
953 
954 	if (enable) {
955 		err = genphy_resume(phydev);
956 		if (err < 0)
957 			goto err_put_dev;
958 
959 		err = genphy_read_status(phydev);
960 		if (err < 0)
961 			goto err_put_dev;
962 	} else {
963 		err = genphy_suspend(phydev);
964 		if (err < 0)
965 			goto err_put_dev;
966 	}
967 
968 	if (ds->ops->adjust_link)
969 		ds->ops->adjust_link(ds, port, phydev);
970 
971 	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
972 
973 err_put_dev:
974 	put_device(&phydev->mdio.dev);
975 	return err;
976 }
977 
978 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
979 {
980 	struct device_node *dn = dp->dn;
981 	struct dsa_switch *ds = dp->ds;
982 	struct phy_device *phydev;
983 	int port = dp->index;
984 	phy_interface_t mode;
985 	int err;
986 
987 	err = of_phy_register_fixed_link(dn);
988 	if (err) {
989 		dev_err(ds->dev,
990 			"failed to register the fixed PHY of port %d\n",
991 			port);
992 		return err;
993 	}
994 
995 	phydev = of_phy_find_device(dn);
996 
997 	err = of_get_phy_mode(dn, &mode);
998 	if (err)
999 		mode = PHY_INTERFACE_MODE_NA;
1000 	phydev->interface = mode;
1001 
1002 	genphy_read_status(phydev);
1003 
1004 	if (ds->ops->adjust_link)
1005 		ds->ops->adjust_link(ds, port, phydev);
1006 
1007 	put_device(&phydev->mdio.dev);
1008 
1009 	return 0;
1010 }
1011 
1012 static int dsa_port_phylink_register(struct dsa_port *dp)
1013 {
1014 	struct dsa_switch *ds = dp->ds;
1015 	struct device_node *port_dn = dp->dn;
1016 	phy_interface_t mode;
1017 	int err;
1018 
1019 	err = of_get_phy_mode(port_dn, &mode);
1020 	if (err)
1021 		mode = PHY_INTERFACE_MODE_NA;
1022 
1023 	dp->pl_config.dev = ds->dev;
1024 	dp->pl_config.type = PHYLINK_DEV;
1025 	dp->pl_config.pcs_poll = ds->pcs_poll;
1026 
1027 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1028 				mode, &dsa_port_phylink_mac_ops);
1029 	if (IS_ERR(dp->pl)) {
1030 		pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1031 		return PTR_ERR(dp->pl);
1032 	}
1033 
1034 	err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1035 	if (err && err != -ENODEV) {
1036 		pr_err("could not attach to PHY: %d\n", err);
1037 		goto err_phy_connect;
1038 	}
1039 
1040 	return 0;
1041 
1042 err_phy_connect:
1043 	phylink_destroy(dp->pl);
1044 	return err;
1045 }
1046 
1047 int dsa_port_link_register_of(struct dsa_port *dp)
1048 {
1049 	struct dsa_switch *ds = dp->ds;
1050 	struct device_node *phy_np;
1051 	int port = dp->index;
1052 
1053 	if (!ds->ops->adjust_link) {
1054 		phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1055 		if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1056 			if (ds->ops->phylink_mac_link_down)
1057 				ds->ops->phylink_mac_link_down(ds, port,
1058 					MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1059 			return dsa_port_phylink_register(dp);
1060 		}
1061 		return 0;
1062 	}
1063 
1064 	dev_warn(ds->dev,
1065 		 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1066 
1067 	if (of_phy_is_fixed_link(dp->dn))
1068 		return dsa_port_fixed_link_register_of(dp);
1069 	else
1070 		return dsa_port_setup_phy_of(dp, true);
1071 }
1072 
1073 void dsa_port_link_unregister_of(struct dsa_port *dp)
1074 {
1075 	struct dsa_switch *ds = dp->ds;
1076 
1077 	if (!ds->ops->adjust_link && dp->pl) {
1078 		rtnl_lock();
1079 		phylink_disconnect_phy(dp->pl);
1080 		rtnl_unlock();
1081 		phylink_destroy(dp->pl);
1082 		dp->pl = NULL;
1083 		return;
1084 	}
1085 
1086 	if (of_phy_is_fixed_link(dp->dn))
1087 		of_phy_deregister_fixed_link(dp->dn);
1088 	else
1089 		dsa_port_setup_phy_of(dp, false);
1090 }
1091 
1092 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1093 {
1094 	struct phy_device *phydev;
1095 	int ret = -EOPNOTSUPP;
1096 
1097 	if (of_phy_is_fixed_link(dp->dn))
1098 		return ret;
1099 
1100 	phydev = dsa_port_get_phy_device(dp);
1101 	if (IS_ERR_OR_NULL(phydev))
1102 		return ret;
1103 
1104 	ret = phy_ethtool_get_strings(phydev, data);
1105 	put_device(&phydev->mdio.dev);
1106 
1107 	return ret;
1108 }
1109 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1110 
1111 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1112 {
1113 	struct phy_device *phydev;
1114 	int ret = -EOPNOTSUPP;
1115 
1116 	if (of_phy_is_fixed_link(dp->dn))
1117 		return ret;
1118 
1119 	phydev = dsa_port_get_phy_device(dp);
1120 	if (IS_ERR_OR_NULL(phydev))
1121 		return ret;
1122 
1123 	ret = phy_ethtool_get_stats(phydev, NULL, data);
1124 	put_device(&phydev->mdio.dev);
1125 
1126 	return ret;
1127 }
1128 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1129 
1130 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1131 {
1132 	struct phy_device *phydev;
1133 	int ret = -EOPNOTSUPP;
1134 
1135 	if (of_phy_is_fixed_link(dp->dn))
1136 		return ret;
1137 
1138 	phydev = dsa_port_get_phy_device(dp);
1139 	if (IS_ERR_OR_NULL(phydev))
1140 		return ret;
1141 
1142 	ret = phy_ethtool_get_sset_count(phydev);
1143 	put_device(&phydev->mdio.dev);
1144 
1145 	return ret;
1146 }
1147 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1148 
1149 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1150 {
1151 	struct dsa_notifier_hsr_info info = {
1152 		.sw_index = dp->ds->index,
1153 		.port = dp->index,
1154 		.hsr = hsr,
1155 	};
1156 	int err;
1157 
1158 	dp->hsr_dev = hsr;
1159 
1160 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1161 	if (err)
1162 		dp->hsr_dev = NULL;
1163 
1164 	return err;
1165 }
1166 
1167 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1168 {
1169 	struct dsa_notifier_hsr_info info = {
1170 		.sw_index = dp->ds->index,
1171 		.port = dp->index,
1172 		.hsr = hsr,
1173 	};
1174 	int err;
1175 
1176 	dp->hsr_dev = NULL;
1177 
1178 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1179 	if (err)
1180 		pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
1181 }
1182 
1183 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid)
1184 {
1185 	struct dsa_notifier_tag_8021q_vlan_info info = {
1186 		.tree_index = dp->ds->dst->index,
1187 		.sw_index = dp->ds->index,
1188 		.port = dp->index,
1189 		.vid = vid,
1190 	};
1191 
1192 	return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1193 }
1194 
1195 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
1196 {
1197 	struct dsa_notifier_tag_8021q_vlan_info info = {
1198 		.tree_index = dp->ds->dst->index,
1199 		.sw_index = dp->ds->index,
1200 		.port = dp->index,
1201 		.vid = vid,
1202 	};
1203 	int err;
1204 
1205 	err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1206 	if (err)
1207 		pr_err("DSA: failed to notify tag_8021q VLAN deletion: %pe\n",
1208 		       ERR_PTR(err));
1209 }
1210