xref: /openbmc/linux/net/dsa/port.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 
14 #include "dsa_priv.h"
15 
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 	return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32 
33 int dsa_port_set_state(struct dsa_port *dp, u8 state)
34 {
35 	struct dsa_switch *ds = dp->ds;
36 	int port = dp->index;
37 
38 	if (!ds->ops->port_stp_state_set)
39 		return -EOPNOTSUPP;
40 
41 	ds->ops->port_stp_state_set(ds, port, state);
42 
43 	if (ds->ops->port_fast_age) {
44 		/* Fast age FDB entries or flush appropriate forwarding database
45 		 * for the given port, if we are moving it from Learning or
46 		 * Forwarding state, to Disabled or Blocking or Listening state.
47 		 */
48 
49 		if ((dp->stp_state == BR_STATE_LEARNING ||
50 		     dp->stp_state == BR_STATE_FORWARDING) &&
51 		    (state == BR_STATE_DISABLED ||
52 		     state == BR_STATE_BLOCKING ||
53 		     state == BR_STATE_LISTENING))
54 			ds->ops->port_fast_age(ds, port);
55 	}
56 
57 	dp->stp_state = state;
58 
59 	return 0;
60 }
61 
62 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
63 {
64 	int err;
65 
66 	err = dsa_port_set_state(dp, state);
67 	if (err)
68 		pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
69 }
70 
71 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
72 {
73 	struct dsa_switch *ds = dp->ds;
74 	int port = dp->index;
75 	int err;
76 
77 	if (ds->ops->port_enable) {
78 		err = ds->ops->port_enable(ds, port, phy);
79 		if (err)
80 			return err;
81 	}
82 
83 	if (!dp->bridge_dev)
84 		dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
85 
86 	if (dp->pl)
87 		phylink_start(dp->pl);
88 
89 	return 0;
90 }
91 
92 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
93 {
94 	int err;
95 
96 	rtnl_lock();
97 	err = dsa_port_enable_rt(dp, phy);
98 	rtnl_unlock();
99 
100 	return err;
101 }
102 
103 void dsa_port_disable_rt(struct dsa_port *dp)
104 {
105 	struct dsa_switch *ds = dp->ds;
106 	int port = dp->index;
107 
108 	if (dp->pl)
109 		phylink_stop(dp->pl);
110 
111 	if (!dp->bridge_dev)
112 		dsa_port_set_state_now(dp, BR_STATE_DISABLED);
113 
114 	if (ds->ops->port_disable)
115 		ds->ops->port_disable(ds, port);
116 }
117 
118 void dsa_port_disable(struct dsa_port *dp)
119 {
120 	rtnl_lock();
121 	dsa_port_disable_rt(dp);
122 	rtnl_unlock();
123 }
124 
125 static void dsa_port_change_brport_flags(struct dsa_port *dp,
126 					 bool bridge_offload)
127 {
128 	struct switchdev_brport_flags flags;
129 	int flag;
130 
131 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
132 	if (bridge_offload)
133 		flags.val = flags.mask;
134 	else
135 		flags.val = flags.mask & ~BR_LEARNING;
136 
137 	for_each_set_bit(flag, &flags.mask, 32) {
138 		struct switchdev_brport_flags tmp;
139 
140 		tmp.val = flags.val & BIT(flag);
141 		tmp.mask = BIT(flag);
142 
143 		dsa_port_bridge_flags(dp, tmp, NULL);
144 	}
145 }
146 
147 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
148 {
149 	struct dsa_notifier_bridge_info info = {
150 		.tree_index = dp->ds->dst->index,
151 		.sw_index = dp->ds->index,
152 		.port = dp->index,
153 		.br = br,
154 	};
155 	int err;
156 
157 	/* Notify the port driver to set its configurable flags in a way that
158 	 * matches the initial settings of a bridge port.
159 	 */
160 	dsa_port_change_brport_flags(dp, true);
161 
162 	/* Here the interface is already bridged. Reflect the current
163 	 * configuration so that drivers can program their chips accordingly.
164 	 */
165 	dp->bridge_dev = br;
166 
167 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
168 
169 	/* The bridging is rolled back on error */
170 	if (err) {
171 		dsa_port_change_brport_flags(dp, false);
172 		dp->bridge_dev = NULL;
173 	}
174 
175 	return err;
176 }
177 
178 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
179 {
180 	struct dsa_notifier_bridge_info info = {
181 		.tree_index = dp->ds->dst->index,
182 		.sw_index = dp->ds->index,
183 		.port = dp->index,
184 		.br = br,
185 	};
186 	int err;
187 
188 	/* Here the port is already unbridged. Reflect the current configuration
189 	 * so that drivers can program their chips accordingly.
190 	 */
191 	dp->bridge_dev = NULL;
192 
193 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
194 	if (err)
195 		pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
196 
197 	/* Configure the port for standalone mode (no address learning,
198 	 * flood everything).
199 	 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
200 	 * when the user requests it through netlink or sysfs, but not
201 	 * automatically at port join or leave, so we need to handle resetting
202 	 * the brport flags ourselves. But we even prefer it that way, because
203 	 * otherwise, some setups might never get the notification they need,
204 	 * for example, when a port leaves a LAG that offloads the bridge,
205 	 * it becomes standalone, but as far as the bridge is concerned, no
206 	 * port ever left.
207 	 */
208 	dsa_port_change_brport_flags(dp, false);
209 
210 	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
211 	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
212 	 */
213 	dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
214 }
215 
216 int dsa_port_lag_change(struct dsa_port *dp,
217 			struct netdev_lag_lower_state_info *linfo)
218 {
219 	struct dsa_notifier_lag_info info = {
220 		.sw_index = dp->ds->index,
221 		.port = dp->index,
222 	};
223 	bool tx_enabled;
224 
225 	if (!dp->lag_dev)
226 		return 0;
227 
228 	/* On statically configured aggregates (e.g. loadbalance
229 	 * without LACP) ports will always be tx_enabled, even if the
230 	 * link is down. Thus we require both link_up and tx_enabled
231 	 * in order to include it in the tx set.
232 	 */
233 	tx_enabled = linfo->link_up && linfo->tx_enabled;
234 
235 	if (tx_enabled == dp->lag_tx_enabled)
236 		return 0;
237 
238 	dp->lag_tx_enabled = tx_enabled;
239 
240 	return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
241 }
242 
243 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
244 		      struct netdev_lag_upper_info *uinfo)
245 {
246 	struct dsa_notifier_lag_info info = {
247 		.sw_index = dp->ds->index,
248 		.port = dp->index,
249 		.lag = lag,
250 		.info = uinfo,
251 	};
252 	int err;
253 
254 	dsa_lag_map(dp->ds->dst, lag);
255 	dp->lag_dev = lag;
256 
257 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
258 	if (err) {
259 		dp->lag_dev = NULL;
260 		dsa_lag_unmap(dp->ds->dst, lag);
261 	}
262 
263 	return err;
264 }
265 
266 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
267 {
268 	struct dsa_notifier_lag_info info = {
269 		.sw_index = dp->ds->index,
270 		.port = dp->index,
271 		.lag = lag,
272 	};
273 	int err;
274 
275 	if (!dp->lag_dev)
276 		return;
277 
278 	/* Port might have been part of a LAG that in turn was
279 	 * attached to a bridge.
280 	 */
281 	if (dp->bridge_dev)
282 		dsa_port_bridge_leave(dp, dp->bridge_dev);
283 
284 	dp->lag_tx_enabled = false;
285 	dp->lag_dev = NULL;
286 
287 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
288 	if (err)
289 		pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
290 		       err);
291 
292 	dsa_lag_unmap(dp->ds->dst, lag);
293 }
294 
295 /* Must be called under rcu_read_lock() */
296 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
297 					      bool vlan_filtering,
298 					      struct netlink_ext_ack *extack)
299 {
300 	struct dsa_switch *ds = dp->ds;
301 	int err, i;
302 
303 	/* VLAN awareness was off, so the question is "can we turn it on".
304 	 * We may have had 8021q uppers, those need to go. Make sure we don't
305 	 * enter an inconsistent state: deny changing the VLAN awareness state
306 	 * as long as we have 8021q uppers.
307 	 */
308 	if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
309 		struct net_device *upper_dev, *slave = dp->slave;
310 		struct net_device *br = dp->bridge_dev;
311 		struct list_head *iter;
312 
313 		netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
314 			struct bridge_vlan_info br_info;
315 			u16 vid;
316 
317 			if (!is_vlan_dev(upper_dev))
318 				continue;
319 
320 			vid = vlan_dev_vlan_id(upper_dev);
321 
322 			/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
323 			 * device, respectively the VID is not found, returning
324 			 * 0 means success, which is a failure for us here.
325 			 */
326 			err = br_vlan_get_info(br, vid, &br_info);
327 			if (err == 0) {
328 				NL_SET_ERR_MSG_MOD(extack,
329 						   "Must first remove VLAN uppers having VIDs also present in bridge");
330 				return false;
331 			}
332 		}
333 	}
334 
335 	if (!ds->vlan_filtering_is_global)
336 		return true;
337 
338 	/* For cases where enabling/disabling VLAN awareness is global to the
339 	 * switch, we need to handle the case where multiple bridges span
340 	 * different ports of the same switch device and one of them has a
341 	 * different setting than what is being requested.
342 	 */
343 	for (i = 0; i < ds->num_ports; i++) {
344 		struct net_device *other_bridge;
345 
346 		other_bridge = dsa_to_port(ds, i)->bridge_dev;
347 		if (!other_bridge)
348 			continue;
349 		/* If it's the same bridge, it also has same
350 		 * vlan_filtering setting => no need to check
351 		 */
352 		if (other_bridge == dp->bridge_dev)
353 			continue;
354 		if (br_vlan_enabled(other_bridge) != vlan_filtering) {
355 			NL_SET_ERR_MSG_MOD(extack,
356 					   "VLAN filtering is a global setting");
357 			return false;
358 		}
359 	}
360 	return true;
361 }
362 
363 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
364 			    struct netlink_ext_ack *extack)
365 {
366 	struct dsa_switch *ds = dp->ds;
367 	bool apply;
368 	int err;
369 
370 	if (!ds->ops->port_vlan_filtering)
371 		return -EOPNOTSUPP;
372 
373 	/* We are called from dsa_slave_switchdev_blocking_event(),
374 	 * which is not under rcu_read_lock(), unlike
375 	 * dsa_slave_switchdev_event().
376 	 */
377 	rcu_read_lock();
378 	apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
379 	rcu_read_unlock();
380 	if (!apply)
381 		return -EINVAL;
382 
383 	if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
384 		return 0;
385 
386 	err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
387 					   extack);
388 	if (err)
389 		return err;
390 
391 	if (ds->vlan_filtering_is_global)
392 		ds->vlan_filtering = vlan_filtering;
393 	else
394 		dp->vlan_filtering = vlan_filtering;
395 
396 	return 0;
397 }
398 
399 /* This enforces legacy behavior for switch drivers which assume they can't
400  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
401  */
402 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
403 {
404 	struct dsa_switch *ds = dp->ds;
405 
406 	if (!dp->bridge_dev)
407 		return false;
408 
409 	return (!ds->configure_vlan_while_not_filtering &&
410 		!br_vlan_enabled(dp->bridge_dev));
411 }
412 
413 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
414 {
415 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
416 	unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
417 	struct dsa_notifier_ageing_time_info info;
418 	int err;
419 
420 	info.ageing_time = ageing_time;
421 
422 	err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
423 	if (err)
424 		return err;
425 
426 	dp->ageing_time = ageing_time;
427 
428 	return 0;
429 }
430 
431 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
432 			      struct switchdev_brport_flags flags,
433 			      struct netlink_ext_ack *extack)
434 {
435 	struct dsa_switch *ds = dp->ds;
436 
437 	if (!ds->ops->port_pre_bridge_flags)
438 		return -EINVAL;
439 
440 	return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
441 }
442 
443 int dsa_port_bridge_flags(const struct dsa_port *dp,
444 			  struct switchdev_brport_flags flags,
445 			  struct netlink_ext_ack *extack)
446 {
447 	struct dsa_switch *ds = dp->ds;
448 
449 	if (!ds->ops->port_bridge_flags)
450 		return -EINVAL;
451 
452 	return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
453 }
454 
455 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
456 		     struct netlink_ext_ack *extack)
457 {
458 	struct dsa_switch *ds = dp->ds;
459 
460 	if (!ds->ops->port_set_mrouter)
461 		return -EOPNOTSUPP;
462 
463 	return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
464 }
465 
466 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
467 			bool propagate_upstream)
468 {
469 	struct dsa_notifier_mtu_info info = {
470 		.sw_index = dp->ds->index,
471 		.propagate_upstream = propagate_upstream,
472 		.port = dp->index,
473 		.mtu = new_mtu,
474 	};
475 
476 	return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
477 }
478 
479 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
480 		     u16 vid)
481 {
482 	struct dsa_notifier_fdb_info info = {
483 		.sw_index = dp->ds->index,
484 		.port = dp->index,
485 		.addr = addr,
486 		.vid = vid,
487 	};
488 
489 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
490 }
491 
492 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
493 		     u16 vid)
494 {
495 	struct dsa_notifier_fdb_info info = {
496 		.sw_index = dp->ds->index,
497 		.port = dp->index,
498 		.addr = addr,
499 		.vid = vid,
500 
501 	};
502 
503 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
504 }
505 
506 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
507 {
508 	struct dsa_switch *ds = dp->ds;
509 	int port = dp->index;
510 
511 	if (!ds->ops->port_fdb_dump)
512 		return -EOPNOTSUPP;
513 
514 	return ds->ops->port_fdb_dump(ds, port, cb, data);
515 }
516 
517 int dsa_port_mdb_add(const struct dsa_port *dp,
518 		     const struct switchdev_obj_port_mdb *mdb)
519 {
520 	struct dsa_notifier_mdb_info info = {
521 		.sw_index = dp->ds->index,
522 		.port = dp->index,
523 		.mdb = mdb,
524 	};
525 
526 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
527 }
528 
529 int dsa_port_mdb_del(const struct dsa_port *dp,
530 		     const struct switchdev_obj_port_mdb *mdb)
531 {
532 	struct dsa_notifier_mdb_info info = {
533 		.sw_index = dp->ds->index,
534 		.port = dp->index,
535 		.mdb = mdb,
536 	};
537 
538 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
539 }
540 
541 int dsa_port_vlan_add(struct dsa_port *dp,
542 		      const struct switchdev_obj_port_vlan *vlan,
543 		      struct netlink_ext_ack *extack)
544 {
545 	struct dsa_notifier_vlan_info info = {
546 		.sw_index = dp->ds->index,
547 		.port = dp->index,
548 		.vlan = vlan,
549 		.extack = extack,
550 	};
551 
552 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
553 }
554 
555 int dsa_port_vlan_del(struct dsa_port *dp,
556 		      const struct switchdev_obj_port_vlan *vlan)
557 {
558 	struct dsa_notifier_vlan_info info = {
559 		.sw_index = dp->ds->index,
560 		.port = dp->index,
561 		.vlan = vlan,
562 	};
563 
564 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
565 }
566 
567 int dsa_port_mrp_add(const struct dsa_port *dp,
568 		     const struct switchdev_obj_mrp *mrp)
569 {
570 	struct dsa_notifier_mrp_info info = {
571 		.sw_index = dp->ds->index,
572 		.port = dp->index,
573 		.mrp = mrp,
574 	};
575 
576 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
577 }
578 
579 int dsa_port_mrp_del(const struct dsa_port *dp,
580 		     const struct switchdev_obj_mrp *mrp)
581 {
582 	struct dsa_notifier_mrp_info info = {
583 		.sw_index = dp->ds->index,
584 		.port = dp->index,
585 		.mrp = mrp,
586 	};
587 
588 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
589 }
590 
591 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
592 			       const struct switchdev_obj_ring_role_mrp *mrp)
593 {
594 	struct dsa_notifier_mrp_ring_role_info info = {
595 		.sw_index = dp->ds->index,
596 		.port = dp->index,
597 		.mrp = mrp,
598 	};
599 
600 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
601 }
602 
603 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
604 			       const struct switchdev_obj_ring_role_mrp *mrp)
605 {
606 	struct dsa_notifier_mrp_ring_role_info info = {
607 		.sw_index = dp->ds->index,
608 		.port = dp->index,
609 		.mrp = mrp,
610 	};
611 
612 	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
613 }
614 
615 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
616 			       const struct dsa_device_ops *tag_ops)
617 {
618 	cpu_dp->filter = tag_ops->filter;
619 	cpu_dp->rcv = tag_ops->rcv;
620 	cpu_dp->tag_ops = tag_ops;
621 }
622 
623 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
624 {
625 	struct device_node *phy_dn;
626 	struct phy_device *phydev;
627 
628 	phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
629 	if (!phy_dn)
630 		return NULL;
631 
632 	phydev = of_phy_find_device(phy_dn);
633 	if (!phydev) {
634 		of_node_put(phy_dn);
635 		return ERR_PTR(-EPROBE_DEFER);
636 	}
637 
638 	of_node_put(phy_dn);
639 	return phydev;
640 }
641 
642 static void dsa_port_phylink_validate(struct phylink_config *config,
643 				      unsigned long *supported,
644 				      struct phylink_link_state *state)
645 {
646 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
647 	struct dsa_switch *ds = dp->ds;
648 
649 	if (!ds->ops->phylink_validate)
650 		return;
651 
652 	ds->ops->phylink_validate(ds, dp->index, supported, state);
653 }
654 
655 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
656 					       struct phylink_link_state *state)
657 {
658 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
659 	struct dsa_switch *ds = dp->ds;
660 	int err;
661 
662 	/* Only called for inband modes */
663 	if (!ds->ops->phylink_mac_link_state) {
664 		state->link = 0;
665 		return;
666 	}
667 
668 	err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
669 	if (err < 0) {
670 		dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
671 			dp->index, err);
672 		state->link = 0;
673 	}
674 }
675 
676 static void dsa_port_phylink_mac_config(struct phylink_config *config,
677 					unsigned int mode,
678 					const struct phylink_link_state *state)
679 {
680 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
681 	struct dsa_switch *ds = dp->ds;
682 
683 	if (!ds->ops->phylink_mac_config)
684 		return;
685 
686 	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
687 }
688 
689 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
690 {
691 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
692 	struct dsa_switch *ds = dp->ds;
693 
694 	if (!ds->ops->phylink_mac_an_restart)
695 		return;
696 
697 	ds->ops->phylink_mac_an_restart(ds, dp->index);
698 }
699 
700 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
701 					   unsigned int mode,
702 					   phy_interface_t interface)
703 {
704 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
705 	struct phy_device *phydev = NULL;
706 	struct dsa_switch *ds = dp->ds;
707 
708 	if (dsa_is_user_port(ds, dp->index))
709 		phydev = dp->slave->phydev;
710 
711 	if (!ds->ops->phylink_mac_link_down) {
712 		if (ds->ops->adjust_link && phydev)
713 			ds->ops->adjust_link(ds, dp->index, phydev);
714 		return;
715 	}
716 
717 	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
718 }
719 
720 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
721 					 struct phy_device *phydev,
722 					 unsigned int mode,
723 					 phy_interface_t interface,
724 					 int speed, int duplex,
725 					 bool tx_pause, bool rx_pause)
726 {
727 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
728 	struct dsa_switch *ds = dp->ds;
729 
730 	if (!ds->ops->phylink_mac_link_up) {
731 		if (ds->ops->adjust_link && phydev)
732 			ds->ops->adjust_link(ds, dp->index, phydev);
733 		return;
734 	}
735 
736 	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
737 				     speed, duplex, tx_pause, rx_pause);
738 }
739 
740 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
741 	.validate = dsa_port_phylink_validate,
742 	.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
743 	.mac_config = dsa_port_phylink_mac_config,
744 	.mac_an_restart = dsa_port_phylink_mac_an_restart,
745 	.mac_link_down = dsa_port_phylink_mac_link_down,
746 	.mac_link_up = dsa_port_phylink_mac_link_up,
747 };
748 
749 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
750 {
751 	struct dsa_switch *ds = dp->ds;
752 	struct phy_device *phydev;
753 	int port = dp->index;
754 	int err = 0;
755 
756 	phydev = dsa_port_get_phy_device(dp);
757 	if (!phydev)
758 		return 0;
759 
760 	if (IS_ERR(phydev))
761 		return PTR_ERR(phydev);
762 
763 	if (enable) {
764 		err = genphy_resume(phydev);
765 		if (err < 0)
766 			goto err_put_dev;
767 
768 		err = genphy_read_status(phydev);
769 		if (err < 0)
770 			goto err_put_dev;
771 	} else {
772 		err = genphy_suspend(phydev);
773 		if (err < 0)
774 			goto err_put_dev;
775 	}
776 
777 	if (ds->ops->adjust_link)
778 		ds->ops->adjust_link(ds, port, phydev);
779 
780 	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
781 
782 err_put_dev:
783 	put_device(&phydev->mdio.dev);
784 	return err;
785 }
786 
787 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
788 {
789 	struct device_node *dn = dp->dn;
790 	struct dsa_switch *ds = dp->ds;
791 	struct phy_device *phydev;
792 	int port = dp->index;
793 	phy_interface_t mode;
794 	int err;
795 
796 	err = of_phy_register_fixed_link(dn);
797 	if (err) {
798 		dev_err(ds->dev,
799 			"failed to register the fixed PHY of port %d\n",
800 			port);
801 		return err;
802 	}
803 
804 	phydev = of_phy_find_device(dn);
805 
806 	err = of_get_phy_mode(dn, &mode);
807 	if (err)
808 		mode = PHY_INTERFACE_MODE_NA;
809 	phydev->interface = mode;
810 
811 	genphy_read_status(phydev);
812 
813 	if (ds->ops->adjust_link)
814 		ds->ops->adjust_link(ds, port, phydev);
815 
816 	put_device(&phydev->mdio.dev);
817 
818 	return 0;
819 }
820 
821 static int dsa_port_phylink_register(struct dsa_port *dp)
822 {
823 	struct dsa_switch *ds = dp->ds;
824 	struct device_node *port_dn = dp->dn;
825 	phy_interface_t mode;
826 	int err;
827 
828 	err = of_get_phy_mode(port_dn, &mode);
829 	if (err)
830 		mode = PHY_INTERFACE_MODE_NA;
831 
832 	dp->pl_config.dev = ds->dev;
833 	dp->pl_config.type = PHYLINK_DEV;
834 	dp->pl_config.pcs_poll = ds->pcs_poll;
835 
836 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
837 				mode, &dsa_port_phylink_mac_ops);
838 	if (IS_ERR(dp->pl)) {
839 		pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
840 		return PTR_ERR(dp->pl);
841 	}
842 
843 	err = phylink_of_phy_connect(dp->pl, port_dn, 0);
844 	if (err && err != -ENODEV) {
845 		pr_err("could not attach to PHY: %d\n", err);
846 		goto err_phy_connect;
847 	}
848 
849 	return 0;
850 
851 err_phy_connect:
852 	phylink_destroy(dp->pl);
853 	return err;
854 }
855 
856 int dsa_port_link_register_of(struct dsa_port *dp)
857 {
858 	struct dsa_switch *ds = dp->ds;
859 	struct device_node *phy_np;
860 	int port = dp->index;
861 
862 	if (!ds->ops->adjust_link) {
863 		phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
864 		if (of_phy_is_fixed_link(dp->dn) || phy_np) {
865 			if (ds->ops->phylink_mac_link_down)
866 				ds->ops->phylink_mac_link_down(ds, port,
867 					MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
868 			return dsa_port_phylink_register(dp);
869 		}
870 		return 0;
871 	}
872 
873 	dev_warn(ds->dev,
874 		 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
875 
876 	if (of_phy_is_fixed_link(dp->dn))
877 		return dsa_port_fixed_link_register_of(dp);
878 	else
879 		return dsa_port_setup_phy_of(dp, true);
880 }
881 
882 void dsa_port_link_unregister_of(struct dsa_port *dp)
883 {
884 	struct dsa_switch *ds = dp->ds;
885 
886 	if (!ds->ops->adjust_link && dp->pl) {
887 		rtnl_lock();
888 		phylink_disconnect_phy(dp->pl);
889 		rtnl_unlock();
890 		phylink_destroy(dp->pl);
891 		dp->pl = NULL;
892 		return;
893 	}
894 
895 	if (of_phy_is_fixed_link(dp->dn))
896 		of_phy_deregister_fixed_link(dp->dn);
897 	else
898 		dsa_port_setup_phy_of(dp, false);
899 }
900 
901 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
902 {
903 	struct phy_device *phydev;
904 	int ret = -EOPNOTSUPP;
905 
906 	if (of_phy_is_fixed_link(dp->dn))
907 		return ret;
908 
909 	phydev = dsa_port_get_phy_device(dp);
910 	if (IS_ERR_OR_NULL(phydev))
911 		return ret;
912 
913 	ret = phy_ethtool_get_strings(phydev, data);
914 	put_device(&phydev->mdio.dev);
915 
916 	return ret;
917 }
918 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
919 
920 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
921 {
922 	struct phy_device *phydev;
923 	int ret = -EOPNOTSUPP;
924 
925 	if (of_phy_is_fixed_link(dp->dn))
926 		return ret;
927 
928 	phydev = dsa_port_get_phy_device(dp);
929 	if (IS_ERR_OR_NULL(phydev))
930 		return ret;
931 
932 	ret = phy_ethtool_get_stats(phydev, NULL, data);
933 	put_device(&phydev->mdio.dev);
934 
935 	return ret;
936 }
937 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
938 
939 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
940 {
941 	struct phy_device *phydev;
942 	int ret = -EOPNOTSUPP;
943 
944 	if (of_phy_is_fixed_link(dp->dn))
945 		return ret;
946 
947 	phydev = dsa_port_get_phy_device(dp);
948 	if (IS_ERR_OR_NULL(phydev))
949 		return ret;
950 
951 	ret = phy_ethtool_get_sset_count(phydev);
952 	put_device(&phydev->mdio.dev);
953 
954 	return ret;
955 }
956 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
957 
958 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
959 {
960 	struct dsa_notifier_hsr_info info = {
961 		.sw_index = dp->ds->index,
962 		.port = dp->index,
963 		.hsr = hsr,
964 	};
965 	int err;
966 
967 	dp->hsr_dev = hsr;
968 
969 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
970 	if (err)
971 		dp->hsr_dev = NULL;
972 
973 	return err;
974 }
975 
976 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
977 {
978 	struct dsa_notifier_hsr_info info = {
979 		.sw_index = dp->ds->index,
980 		.port = dp->index,
981 		.hsr = hsr,
982 	};
983 	int err;
984 
985 	dp->hsr_dev = NULL;
986 
987 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
988 	if (err)
989 		pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
990 }
991