xref: /openbmc/linux/net/dsa/slave.c (revision 3557b3fd)
1 /*
2  * net/dsa/slave.c - Slave device handling
3  * Copyright (c) 2008-2009 Marvell Semiconductor
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10 
11 #include <linux/list.h>
12 #include <linux/etherdevice.h>
13 #include <linux/netdevice.h>
14 #include <linux/phy.h>
15 #include <linux/phy_fixed.h>
16 #include <linux/phylink.h>
17 #include <linux/of_net.h>
18 #include <linux/of_mdio.h>
19 #include <linux/mdio.h>
20 #include <net/rtnetlink.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_mirred.h>
23 #include <linux/if_bridge.h>
24 #include <linux/netpoll.h>
25 #include <linux/ptp_classify.h>
26 
27 #include "dsa_priv.h"
28 
29 static bool dsa_slave_dev_check(struct net_device *dev);
30 
31 /* slave mii_bus handling ***************************************************/
32 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
33 {
34 	struct dsa_switch *ds = bus->priv;
35 
36 	if (ds->phys_mii_mask & (1 << addr))
37 		return ds->ops->phy_read(ds, addr, reg);
38 
39 	return 0xffff;
40 }
41 
42 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
43 {
44 	struct dsa_switch *ds = bus->priv;
45 
46 	if (ds->phys_mii_mask & (1 << addr))
47 		return ds->ops->phy_write(ds, addr, reg, val);
48 
49 	return 0;
50 }
51 
52 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
53 {
54 	ds->slave_mii_bus->priv = (void *)ds;
55 	ds->slave_mii_bus->name = "dsa slave smi";
56 	ds->slave_mii_bus->read = dsa_slave_phy_read;
57 	ds->slave_mii_bus->write = dsa_slave_phy_write;
58 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
59 		 ds->dst->index, ds->index);
60 	ds->slave_mii_bus->parent = ds->dev;
61 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
62 }
63 
64 
65 /* slave device handling ****************************************************/
66 static int dsa_slave_get_iflink(const struct net_device *dev)
67 {
68 	return dsa_slave_to_master(dev)->ifindex;
69 }
70 
71 static int dsa_slave_open(struct net_device *dev)
72 {
73 	struct net_device *master = dsa_slave_to_master(dev);
74 	struct dsa_port *dp = dsa_slave_to_port(dev);
75 	int err;
76 
77 	if (!(master->flags & IFF_UP))
78 		return -ENETDOWN;
79 
80 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
81 		err = dev_uc_add(master, dev->dev_addr);
82 		if (err < 0)
83 			goto out;
84 	}
85 
86 	if (dev->flags & IFF_ALLMULTI) {
87 		err = dev_set_allmulti(master, 1);
88 		if (err < 0)
89 			goto del_unicast;
90 	}
91 	if (dev->flags & IFF_PROMISC) {
92 		err = dev_set_promiscuity(master, 1);
93 		if (err < 0)
94 			goto clear_allmulti;
95 	}
96 
97 	err = dsa_port_enable(dp, dev->phydev);
98 	if (err)
99 		goto clear_promisc;
100 
101 	phylink_start(dp->pl);
102 
103 	return 0;
104 
105 clear_promisc:
106 	if (dev->flags & IFF_PROMISC)
107 		dev_set_promiscuity(master, -1);
108 clear_allmulti:
109 	if (dev->flags & IFF_ALLMULTI)
110 		dev_set_allmulti(master, -1);
111 del_unicast:
112 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
113 		dev_uc_del(master, dev->dev_addr);
114 out:
115 	return err;
116 }
117 
118 static int dsa_slave_close(struct net_device *dev)
119 {
120 	struct net_device *master = dsa_slave_to_master(dev);
121 	struct dsa_port *dp = dsa_slave_to_port(dev);
122 
123 	phylink_stop(dp->pl);
124 
125 	dsa_port_disable(dp);
126 
127 	dev_mc_unsync(master, dev);
128 	dev_uc_unsync(master, dev);
129 	if (dev->flags & IFF_ALLMULTI)
130 		dev_set_allmulti(master, -1);
131 	if (dev->flags & IFF_PROMISC)
132 		dev_set_promiscuity(master, -1);
133 
134 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
135 		dev_uc_del(master, dev->dev_addr);
136 
137 	return 0;
138 }
139 
140 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
141 {
142 	struct net_device *master = dsa_slave_to_master(dev);
143 	if (dev->flags & IFF_UP) {
144 		if (change & IFF_ALLMULTI)
145 			dev_set_allmulti(master,
146 					 dev->flags & IFF_ALLMULTI ? 1 : -1);
147 		if (change & IFF_PROMISC)
148 			dev_set_promiscuity(master,
149 					    dev->flags & IFF_PROMISC ? 1 : -1);
150 	}
151 }
152 
153 static void dsa_slave_set_rx_mode(struct net_device *dev)
154 {
155 	struct net_device *master = dsa_slave_to_master(dev);
156 
157 	dev_mc_sync(master, dev);
158 	dev_uc_sync(master, dev);
159 }
160 
161 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
162 {
163 	struct net_device *master = dsa_slave_to_master(dev);
164 	struct sockaddr *addr = a;
165 	int err;
166 
167 	if (!is_valid_ether_addr(addr->sa_data))
168 		return -EADDRNOTAVAIL;
169 
170 	if (!(dev->flags & IFF_UP))
171 		goto out;
172 
173 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
174 		err = dev_uc_add(master, addr->sa_data);
175 		if (err < 0)
176 			return err;
177 	}
178 
179 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
180 		dev_uc_del(master, dev->dev_addr);
181 
182 out:
183 	ether_addr_copy(dev->dev_addr, addr->sa_data);
184 
185 	return 0;
186 }
187 
188 struct dsa_slave_dump_ctx {
189 	struct net_device *dev;
190 	struct sk_buff *skb;
191 	struct netlink_callback *cb;
192 	int idx;
193 };
194 
195 static int
196 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
197 			   bool is_static, void *data)
198 {
199 	struct dsa_slave_dump_ctx *dump = data;
200 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
201 	u32 seq = dump->cb->nlh->nlmsg_seq;
202 	struct nlmsghdr *nlh;
203 	struct ndmsg *ndm;
204 
205 	if (dump->idx < dump->cb->args[2])
206 		goto skip;
207 
208 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
209 			sizeof(*ndm), NLM_F_MULTI);
210 	if (!nlh)
211 		return -EMSGSIZE;
212 
213 	ndm = nlmsg_data(nlh);
214 	ndm->ndm_family  = AF_BRIDGE;
215 	ndm->ndm_pad1    = 0;
216 	ndm->ndm_pad2    = 0;
217 	ndm->ndm_flags   = NTF_SELF;
218 	ndm->ndm_type    = 0;
219 	ndm->ndm_ifindex = dump->dev->ifindex;
220 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
221 
222 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
223 		goto nla_put_failure;
224 
225 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
226 		goto nla_put_failure;
227 
228 	nlmsg_end(dump->skb, nlh);
229 
230 skip:
231 	dump->idx++;
232 	return 0;
233 
234 nla_put_failure:
235 	nlmsg_cancel(dump->skb, nlh);
236 	return -EMSGSIZE;
237 }
238 
239 static int
240 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
241 		   struct net_device *dev, struct net_device *filter_dev,
242 		   int *idx)
243 {
244 	struct dsa_port *dp = dsa_slave_to_port(dev);
245 	struct dsa_slave_dump_ctx dump = {
246 		.dev = dev,
247 		.skb = skb,
248 		.cb = cb,
249 		.idx = *idx,
250 	};
251 	int err;
252 
253 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
254 	*idx = dump.idx;
255 
256 	return err;
257 }
258 
259 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
260 {
261 	struct dsa_slave_priv *p = netdev_priv(dev);
262 	struct dsa_switch *ds = p->dp->ds;
263 	int port = p->dp->index;
264 
265 	/* Pass through to switch driver if it supports timestamping */
266 	switch (cmd) {
267 	case SIOCGHWTSTAMP:
268 		if (ds->ops->port_hwtstamp_get)
269 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
270 		break;
271 	case SIOCSHWTSTAMP:
272 		if (ds->ops->port_hwtstamp_set)
273 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
274 		break;
275 	}
276 
277 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
278 }
279 
280 static int dsa_slave_port_attr_set(struct net_device *dev,
281 				   const struct switchdev_attr *attr,
282 				   struct switchdev_trans *trans)
283 {
284 	struct dsa_port *dp = dsa_slave_to_port(dev);
285 	int ret;
286 
287 	switch (attr->id) {
288 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
289 		ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
290 		break;
291 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
292 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
293 					      trans);
294 		break;
295 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
296 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
297 		break;
298 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
299 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
300 						trans);
301 		break;
302 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
303 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
304 		break;
305 	default:
306 		ret = -EOPNOTSUPP;
307 		break;
308 	}
309 
310 	return ret;
311 }
312 
313 static int dsa_slave_port_obj_add(struct net_device *dev,
314 				  const struct switchdev_obj *obj,
315 				  struct switchdev_trans *trans)
316 {
317 	struct dsa_port *dp = dsa_slave_to_port(dev);
318 	int err;
319 
320 	/* For the prepare phase, ensure the full set of changes is feasable in
321 	 * one go in order to signal a failure properly. If an operation is not
322 	 * supported, return -EOPNOTSUPP.
323 	 */
324 
325 	switch (obj->id) {
326 	case SWITCHDEV_OBJ_ID_PORT_MDB:
327 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
328 		break;
329 	case SWITCHDEV_OBJ_ID_HOST_MDB:
330 		/* DSA can directly translate this to a normal MDB add,
331 		 * but on the CPU port.
332 		 */
333 		err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
334 				       trans);
335 		break;
336 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
337 		err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj),
338 					trans);
339 		break;
340 	default:
341 		err = -EOPNOTSUPP;
342 		break;
343 	}
344 
345 	return err;
346 }
347 
348 static int dsa_slave_port_obj_del(struct net_device *dev,
349 				  const struct switchdev_obj *obj)
350 {
351 	struct dsa_port *dp = dsa_slave_to_port(dev);
352 	int err;
353 
354 	switch (obj->id) {
355 	case SWITCHDEV_OBJ_ID_PORT_MDB:
356 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
357 		break;
358 	case SWITCHDEV_OBJ_ID_HOST_MDB:
359 		/* DSA can directly translate this to a normal MDB add,
360 		 * but on the CPU port.
361 		 */
362 		err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
363 		break;
364 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
365 		err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
366 		break;
367 	default:
368 		err = -EOPNOTSUPP;
369 		break;
370 	}
371 
372 	return err;
373 }
374 
375 static int dsa_slave_get_port_parent_id(struct net_device *dev,
376 					struct netdev_phys_item_id *ppid)
377 {
378 	struct dsa_port *dp = dsa_slave_to_port(dev);
379 	struct dsa_switch *ds = dp->ds;
380 	struct dsa_switch_tree *dst = ds->dst;
381 
382 	/* For non-legacy ports, devlink is used and it takes
383 	 * care of the name generation. This ndo implementation
384 	 * should be removed with legacy support.
385 	 */
386 	if (dp->ds->devlink)
387 		return -EOPNOTSUPP;
388 
389 	ppid->id_len = sizeof(dst->index);
390 	memcpy(&ppid->id, &dst->index, ppid->id_len);
391 
392 	return 0;
393 }
394 
395 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
396 						     struct sk_buff *skb)
397 {
398 #ifdef CONFIG_NET_POLL_CONTROLLER
399 	struct dsa_slave_priv *p = netdev_priv(dev);
400 
401 	if (p->netpoll)
402 		netpoll_send_skb(p->netpoll, skb);
403 #else
404 	BUG();
405 #endif
406 	return NETDEV_TX_OK;
407 }
408 
409 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
410 				 struct sk_buff *skb)
411 {
412 	struct dsa_switch *ds = p->dp->ds;
413 	struct sk_buff *clone;
414 	unsigned int type;
415 
416 	type = ptp_classify_raw(skb);
417 	if (type == PTP_CLASS_NONE)
418 		return;
419 
420 	if (!ds->ops->port_txtstamp)
421 		return;
422 
423 	clone = skb_clone_sk(skb);
424 	if (!clone)
425 		return;
426 
427 	if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
428 		return;
429 
430 	kfree_skb(clone);
431 }
432 
433 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
434 {
435 	struct dsa_slave_priv *p = netdev_priv(dev);
436 	struct pcpu_sw_netstats *s;
437 	struct sk_buff *nskb;
438 
439 	s = this_cpu_ptr(p->stats64);
440 	u64_stats_update_begin(&s->syncp);
441 	s->tx_packets++;
442 	s->tx_bytes += skb->len;
443 	u64_stats_update_end(&s->syncp);
444 
445 	/* Identify PTP protocol packets, clone them, and pass them to the
446 	 * switch driver
447 	 */
448 	dsa_skb_tx_timestamp(p, skb);
449 
450 	/* Transmit function may have to reallocate the original SKB,
451 	 * in which case it must have freed it. Only free it here on error.
452 	 */
453 	nskb = p->xmit(skb, dev);
454 	if (!nskb) {
455 		kfree_skb(skb);
456 		return NETDEV_TX_OK;
457 	}
458 
459 	/* SKB for netpoll still need to be mangled with the protocol-specific
460 	 * tag to be successfully transmitted
461 	 */
462 	if (unlikely(netpoll_tx_running(dev)))
463 		return dsa_slave_netpoll_send_skb(dev, nskb);
464 
465 	/* Queue the SKB for transmission on the parent interface, but
466 	 * do not modify its EtherType
467 	 */
468 	nskb->dev = dsa_slave_to_master(dev);
469 	dev_queue_xmit(nskb);
470 
471 	return NETDEV_TX_OK;
472 }
473 
474 /* ethtool operations *******************************************************/
475 
476 static void dsa_slave_get_drvinfo(struct net_device *dev,
477 				  struct ethtool_drvinfo *drvinfo)
478 {
479 	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
480 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
481 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
482 }
483 
484 static int dsa_slave_get_regs_len(struct net_device *dev)
485 {
486 	struct dsa_port *dp = dsa_slave_to_port(dev);
487 	struct dsa_switch *ds = dp->ds;
488 
489 	if (ds->ops->get_regs_len)
490 		return ds->ops->get_regs_len(ds, dp->index);
491 
492 	return -EOPNOTSUPP;
493 }
494 
495 static void
496 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
497 {
498 	struct dsa_port *dp = dsa_slave_to_port(dev);
499 	struct dsa_switch *ds = dp->ds;
500 
501 	if (ds->ops->get_regs)
502 		ds->ops->get_regs(ds, dp->index, regs, _p);
503 }
504 
505 static int dsa_slave_nway_reset(struct net_device *dev)
506 {
507 	struct dsa_port *dp = dsa_slave_to_port(dev);
508 
509 	return phylink_ethtool_nway_reset(dp->pl);
510 }
511 
512 static int dsa_slave_get_eeprom_len(struct net_device *dev)
513 {
514 	struct dsa_port *dp = dsa_slave_to_port(dev);
515 	struct dsa_switch *ds = dp->ds;
516 
517 	if (ds->cd && ds->cd->eeprom_len)
518 		return ds->cd->eeprom_len;
519 
520 	if (ds->ops->get_eeprom_len)
521 		return ds->ops->get_eeprom_len(ds);
522 
523 	return 0;
524 }
525 
526 static int dsa_slave_get_eeprom(struct net_device *dev,
527 				struct ethtool_eeprom *eeprom, u8 *data)
528 {
529 	struct dsa_port *dp = dsa_slave_to_port(dev);
530 	struct dsa_switch *ds = dp->ds;
531 
532 	if (ds->ops->get_eeprom)
533 		return ds->ops->get_eeprom(ds, eeprom, data);
534 
535 	return -EOPNOTSUPP;
536 }
537 
538 static int dsa_slave_set_eeprom(struct net_device *dev,
539 				struct ethtool_eeprom *eeprom, u8 *data)
540 {
541 	struct dsa_port *dp = dsa_slave_to_port(dev);
542 	struct dsa_switch *ds = dp->ds;
543 
544 	if (ds->ops->set_eeprom)
545 		return ds->ops->set_eeprom(ds, eeprom, data);
546 
547 	return -EOPNOTSUPP;
548 }
549 
550 static void dsa_slave_get_strings(struct net_device *dev,
551 				  uint32_t stringset, uint8_t *data)
552 {
553 	struct dsa_port *dp = dsa_slave_to_port(dev);
554 	struct dsa_switch *ds = dp->ds;
555 
556 	if (stringset == ETH_SS_STATS) {
557 		int len = ETH_GSTRING_LEN;
558 
559 		strncpy(data, "tx_packets", len);
560 		strncpy(data + len, "tx_bytes", len);
561 		strncpy(data + 2 * len, "rx_packets", len);
562 		strncpy(data + 3 * len, "rx_bytes", len);
563 		if (ds->ops->get_strings)
564 			ds->ops->get_strings(ds, dp->index, stringset,
565 					     data + 4 * len);
566 	}
567 }
568 
569 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
570 					struct ethtool_stats *stats,
571 					uint64_t *data)
572 {
573 	struct dsa_port *dp = dsa_slave_to_port(dev);
574 	struct dsa_slave_priv *p = netdev_priv(dev);
575 	struct dsa_switch *ds = dp->ds;
576 	struct pcpu_sw_netstats *s;
577 	unsigned int start;
578 	int i;
579 
580 	for_each_possible_cpu(i) {
581 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
582 
583 		s = per_cpu_ptr(p->stats64, i);
584 		do {
585 			start = u64_stats_fetch_begin_irq(&s->syncp);
586 			tx_packets = s->tx_packets;
587 			tx_bytes = s->tx_bytes;
588 			rx_packets = s->rx_packets;
589 			rx_bytes = s->rx_bytes;
590 		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
591 		data[0] += tx_packets;
592 		data[1] += tx_bytes;
593 		data[2] += rx_packets;
594 		data[3] += rx_bytes;
595 	}
596 	if (ds->ops->get_ethtool_stats)
597 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
598 }
599 
600 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
601 {
602 	struct dsa_port *dp = dsa_slave_to_port(dev);
603 	struct dsa_switch *ds = dp->ds;
604 
605 	if (sset == ETH_SS_STATS) {
606 		int count;
607 
608 		count = 4;
609 		if (ds->ops->get_sset_count)
610 			count += ds->ops->get_sset_count(ds, dp->index, sset);
611 
612 		return count;
613 	}
614 
615 	return -EOPNOTSUPP;
616 }
617 
618 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
619 {
620 	struct dsa_port *dp = dsa_slave_to_port(dev);
621 	struct dsa_switch *ds = dp->ds;
622 
623 	phylink_ethtool_get_wol(dp->pl, w);
624 
625 	if (ds->ops->get_wol)
626 		ds->ops->get_wol(ds, dp->index, w);
627 }
628 
629 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
630 {
631 	struct dsa_port *dp = dsa_slave_to_port(dev);
632 	struct dsa_switch *ds = dp->ds;
633 	int ret = -EOPNOTSUPP;
634 
635 	phylink_ethtool_set_wol(dp->pl, w);
636 
637 	if (ds->ops->set_wol)
638 		ret = ds->ops->set_wol(ds, dp->index, w);
639 
640 	return ret;
641 }
642 
643 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
644 {
645 	struct dsa_port *dp = dsa_slave_to_port(dev);
646 	struct dsa_switch *ds = dp->ds;
647 	int ret;
648 
649 	/* Port's PHY and MAC both need to be EEE capable */
650 	if (!dev->phydev || !dp->pl)
651 		return -ENODEV;
652 
653 	if (!ds->ops->set_mac_eee)
654 		return -EOPNOTSUPP;
655 
656 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
657 	if (ret)
658 		return ret;
659 
660 	return phylink_ethtool_set_eee(dp->pl, e);
661 }
662 
663 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
664 {
665 	struct dsa_port *dp = dsa_slave_to_port(dev);
666 	struct dsa_switch *ds = dp->ds;
667 	int ret;
668 
669 	/* Port's PHY and MAC both need to be EEE capable */
670 	if (!dev->phydev || !dp->pl)
671 		return -ENODEV;
672 
673 	if (!ds->ops->get_mac_eee)
674 		return -EOPNOTSUPP;
675 
676 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
677 	if (ret)
678 		return ret;
679 
680 	return phylink_ethtool_get_eee(dp->pl, e);
681 }
682 
683 static int dsa_slave_get_link_ksettings(struct net_device *dev,
684 					struct ethtool_link_ksettings *cmd)
685 {
686 	struct dsa_port *dp = dsa_slave_to_port(dev);
687 
688 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
689 }
690 
691 static int dsa_slave_set_link_ksettings(struct net_device *dev,
692 					const struct ethtool_link_ksettings *cmd)
693 {
694 	struct dsa_port *dp = dsa_slave_to_port(dev);
695 
696 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
697 }
698 
699 #ifdef CONFIG_NET_POLL_CONTROLLER
700 static int dsa_slave_netpoll_setup(struct net_device *dev,
701 				   struct netpoll_info *ni)
702 {
703 	struct net_device *master = dsa_slave_to_master(dev);
704 	struct dsa_slave_priv *p = netdev_priv(dev);
705 	struct netpoll *netpoll;
706 	int err = 0;
707 
708 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
709 	if (!netpoll)
710 		return -ENOMEM;
711 
712 	err = __netpoll_setup(netpoll, master);
713 	if (err) {
714 		kfree(netpoll);
715 		goto out;
716 	}
717 
718 	p->netpoll = netpoll;
719 out:
720 	return err;
721 }
722 
723 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
724 {
725 	struct dsa_slave_priv *p = netdev_priv(dev);
726 	struct netpoll *netpoll = p->netpoll;
727 
728 	if (!netpoll)
729 		return;
730 
731 	p->netpoll = NULL;
732 
733 	__netpoll_free(netpoll);
734 }
735 
736 static void dsa_slave_poll_controller(struct net_device *dev)
737 {
738 }
739 #endif
740 
741 static int dsa_slave_get_phys_port_name(struct net_device *dev,
742 					char *name, size_t len)
743 {
744 	struct dsa_port *dp = dsa_slave_to_port(dev);
745 
746 	/* For non-legacy ports, devlink is used and it takes
747 	 * care of the name generation. This ndo implementation
748 	 * should be removed with legacy support.
749 	 */
750 	if (dp->ds->devlink)
751 		return -EOPNOTSUPP;
752 
753 	if (snprintf(name, len, "p%d", dp->index) >= len)
754 		return -EINVAL;
755 
756 	return 0;
757 }
758 
759 static struct dsa_mall_tc_entry *
760 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
761 {
762 	struct dsa_slave_priv *p = netdev_priv(dev);
763 	struct dsa_mall_tc_entry *mall_tc_entry;
764 
765 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
766 		if (mall_tc_entry->cookie == cookie)
767 			return mall_tc_entry;
768 
769 	return NULL;
770 }
771 
772 static int dsa_slave_add_cls_matchall(struct net_device *dev,
773 				      struct tc_cls_matchall_offload *cls,
774 				      bool ingress)
775 {
776 	struct dsa_port *dp = dsa_slave_to_port(dev);
777 	struct dsa_slave_priv *p = netdev_priv(dev);
778 	struct dsa_mall_tc_entry *mall_tc_entry;
779 	__be16 protocol = cls->common.protocol;
780 	struct dsa_switch *ds = dp->ds;
781 	struct net_device *to_dev;
782 	const struct tc_action *a;
783 	struct dsa_port *to_dp;
784 	int err = -EOPNOTSUPP;
785 
786 	if (!ds->ops->port_mirror_add)
787 		return err;
788 
789 	if (!tcf_exts_has_one_action(cls->exts))
790 		return err;
791 
792 	a = tcf_exts_first_action(cls->exts);
793 
794 	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
795 		struct dsa_mall_mirror_tc_entry *mirror;
796 
797 		to_dev = tcf_mirred_dev(a);
798 		if (!to_dev)
799 			return -EINVAL;
800 
801 		if (!dsa_slave_dev_check(to_dev))
802 			return -EOPNOTSUPP;
803 
804 		mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
805 		if (!mall_tc_entry)
806 			return -ENOMEM;
807 
808 		mall_tc_entry->cookie = cls->cookie;
809 		mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
810 		mirror = &mall_tc_entry->mirror;
811 
812 		to_dp = dsa_slave_to_port(to_dev);
813 
814 		mirror->to_local_port = to_dp->index;
815 		mirror->ingress = ingress;
816 
817 		err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
818 		if (err) {
819 			kfree(mall_tc_entry);
820 			return err;
821 		}
822 
823 		list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
824 	}
825 
826 	return 0;
827 }
828 
829 static void dsa_slave_del_cls_matchall(struct net_device *dev,
830 				       struct tc_cls_matchall_offload *cls)
831 {
832 	struct dsa_port *dp = dsa_slave_to_port(dev);
833 	struct dsa_mall_tc_entry *mall_tc_entry;
834 	struct dsa_switch *ds = dp->ds;
835 
836 	if (!ds->ops->port_mirror_del)
837 		return;
838 
839 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
840 	if (!mall_tc_entry)
841 		return;
842 
843 	list_del(&mall_tc_entry->list);
844 
845 	switch (mall_tc_entry->type) {
846 	case DSA_PORT_MALL_MIRROR:
847 		ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
848 		break;
849 	default:
850 		WARN_ON(1);
851 	}
852 
853 	kfree(mall_tc_entry);
854 }
855 
856 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
857 					   struct tc_cls_matchall_offload *cls,
858 					   bool ingress)
859 {
860 	if (cls->common.chain_index)
861 		return -EOPNOTSUPP;
862 
863 	switch (cls->command) {
864 	case TC_CLSMATCHALL_REPLACE:
865 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
866 	case TC_CLSMATCHALL_DESTROY:
867 		dsa_slave_del_cls_matchall(dev, cls);
868 		return 0;
869 	default:
870 		return -EOPNOTSUPP;
871 	}
872 }
873 
874 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
875 				       void *cb_priv, bool ingress)
876 {
877 	struct net_device *dev = cb_priv;
878 
879 	if (!tc_can_offload(dev))
880 		return -EOPNOTSUPP;
881 
882 	switch (type) {
883 	case TC_SETUP_CLSMATCHALL:
884 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
885 	default:
886 		return -EOPNOTSUPP;
887 	}
888 }
889 
890 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
891 					  void *type_data, void *cb_priv)
892 {
893 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
894 }
895 
896 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
897 					  void *type_data, void *cb_priv)
898 {
899 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
900 }
901 
902 static int dsa_slave_setup_tc_block(struct net_device *dev,
903 				    struct tc_block_offload *f)
904 {
905 	tc_setup_cb_t *cb;
906 
907 	if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
908 		cb = dsa_slave_setup_tc_block_cb_ig;
909 	else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
910 		cb = dsa_slave_setup_tc_block_cb_eg;
911 	else
912 		return -EOPNOTSUPP;
913 
914 	switch (f->command) {
915 	case TC_BLOCK_BIND:
916 		return tcf_block_cb_register(f->block, cb, dev, dev, f->extack);
917 	case TC_BLOCK_UNBIND:
918 		tcf_block_cb_unregister(f->block, cb, dev);
919 		return 0;
920 	default:
921 		return -EOPNOTSUPP;
922 	}
923 }
924 
925 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
926 			      void *type_data)
927 {
928 	switch (type) {
929 	case TC_SETUP_BLOCK:
930 		return dsa_slave_setup_tc_block(dev, type_data);
931 	default:
932 		return -EOPNOTSUPP;
933 	}
934 }
935 
936 static void dsa_slave_get_stats64(struct net_device *dev,
937 				  struct rtnl_link_stats64 *stats)
938 {
939 	struct dsa_slave_priv *p = netdev_priv(dev);
940 	struct pcpu_sw_netstats *s;
941 	unsigned int start;
942 	int i;
943 
944 	netdev_stats_to_stats64(stats, &dev->stats);
945 	for_each_possible_cpu(i) {
946 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
947 
948 		s = per_cpu_ptr(p->stats64, i);
949 		do {
950 			start = u64_stats_fetch_begin_irq(&s->syncp);
951 			tx_packets = s->tx_packets;
952 			tx_bytes = s->tx_bytes;
953 			rx_packets = s->rx_packets;
954 			rx_bytes = s->rx_bytes;
955 		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
956 
957 		stats->tx_packets += tx_packets;
958 		stats->tx_bytes += tx_bytes;
959 		stats->rx_packets += rx_packets;
960 		stats->rx_bytes += rx_bytes;
961 	}
962 }
963 
964 static int dsa_slave_get_rxnfc(struct net_device *dev,
965 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
966 {
967 	struct dsa_port *dp = dsa_slave_to_port(dev);
968 	struct dsa_switch *ds = dp->ds;
969 
970 	if (!ds->ops->get_rxnfc)
971 		return -EOPNOTSUPP;
972 
973 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
974 }
975 
976 static int dsa_slave_set_rxnfc(struct net_device *dev,
977 			       struct ethtool_rxnfc *nfc)
978 {
979 	struct dsa_port *dp = dsa_slave_to_port(dev);
980 	struct dsa_switch *ds = dp->ds;
981 
982 	if (!ds->ops->set_rxnfc)
983 		return -EOPNOTSUPP;
984 
985 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
986 }
987 
988 static int dsa_slave_get_ts_info(struct net_device *dev,
989 				 struct ethtool_ts_info *ts)
990 {
991 	struct dsa_slave_priv *p = netdev_priv(dev);
992 	struct dsa_switch *ds = p->dp->ds;
993 
994 	if (!ds->ops->get_ts_info)
995 		return -EOPNOTSUPP;
996 
997 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
998 }
999 
1000 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1001 				     u16 vid)
1002 {
1003 	struct dsa_port *dp = dsa_slave_to_port(dev);
1004 	struct switchdev_obj_port_vlan vlan = {
1005 		.vid_begin = vid,
1006 		.vid_end = vid,
1007 		/* This API only allows programming tagged, non-PVID VIDs */
1008 		.flags = 0,
1009 	};
1010 	struct switchdev_trans trans;
1011 	struct bridge_vlan_info info;
1012 	int ret;
1013 
1014 	/* Check for a possible bridge VLAN entry now since there is no
1015 	 * need to emulate the switchdev prepare + commit phase.
1016 	 */
1017 	if (dp->bridge_dev) {
1018 		/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1019 		 * device, respectively the VID is not found, returning
1020 		 * 0 means success, which is a failure for us here.
1021 		 */
1022 		ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1023 		if (ret == 0)
1024 			return -EBUSY;
1025 	}
1026 
1027 	trans.ph_prepare = true;
1028 	ret = dsa_port_vlan_add(dp, &vlan, &trans);
1029 	if (ret == -EOPNOTSUPP)
1030 		return 0;
1031 
1032 	trans.ph_prepare = false;
1033 	return dsa_port_vlan_add(dp, &vlan, &trans);
1034 }
1035 
1036 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1037 				      u16 vid)
1038 {
1039 	struct dsa_port *dp = dsa_slave_to_port(dev);
1040 	struct switchdev_obj_port_vlan vlan = {
1041 		.vid_begin = vid,
1042 		.vid_end = vid,
1043 		/* This API only allows programming tagged, non-PVID VIDs */
1044 		.flags = 0,
1045 	};
1046 	struct bridge_vlan_info info;
1047 	int ret;
1048 
1049 	/* Check for a possible bridge VLAN entry now since there is no
1050 	 * need to emulate the switchdev prepare + commit phase.
1051 	 */
1052 	if (dp->bridge_dev) {
1053 		/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1054 		 * device, respectively the VID is not found, returning
1055 		 * 0 means success, which is a failure for us here.
1056 		 */
1057 		ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1058 		if (ret == 0)
1059 			return -EBUSY;
1060 	}
1061 
1062 	ret = dsa_port_vlan_del(dp, &vlan);
1063 	if (ret == -EOPNOTSUPP)
1064 		ret = 0;
1065 
1066 	return ret;
1067 }
1068 
1069 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1070 	.get_drvinfo		= dsa_slave_get_drvinfo,
1071 	.get_regs_len		= dsa_slave_get_regs_len,
1072 	.get_regs		= dsa_slave_get_regs,
1073 	.nway_reset		= dsa_slave_nway_reset,
1074 	.get_link		= ethtool_op_get_link,
1075 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1076 	.get_eeprom		= dsa_slave_get_eeprom,
1077 	.set_eeprom		= dsa_slave_set_eeprom,
1078 	.get_strings		= dsa_slave_get_strings,
1079 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1080 	.get_sset_count		= dsa_slave_get_sset_count,
1081 	.set_wol		= dsa_slave_set_wol,
1082 	.get_wol		= dsa_slave_get_wol,
1083 	.set_eee		= dsa_slave_set_eee,
1084 	.get_eee		= dsa_slave_get_eee,
1085 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1086 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1087 	.get_rxnfc		= dsa_slave_get_rxnfc,
1088 	.set_rxnfc		= dsa_slave_set_rxnfc,
1089 	.get_ts_info		= dsa_slave_get_ts_info,
1090 };
1091 
1092 /* legacy way, bypassing the bridge *****************************************/
1093 int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1094 		       struct net_device *dev,
1095 		       const unsigned char *addr, u16 vid,
1096 		       u16 flags,
1097 		       struct netlink_ext_ack *extack)
1098 {
1099 	struct dsa_port *dp = dsa_slave_to_port(dev);
1100 
1101 	return dsa_port_fdb_add(dp, addr, vid);
1102 }
1103 
1104 int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1105 		       struct net_device *dev,
1106 		       const unsigned char *addr, u16 vid)
1107 {
1108 	struct dsa_port *dp = dsa_slave_to_port(dev);
1109 
1110 	return dsa_port_fdb_del(dp, addr, vid);
1111 }
1112 
1113 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1114 {
1115 	struct dsa_port *dp = dsa_slave_to_port(dev);
1116 
1117 	return dp->ds->devlink ? &dp->devlink_port : NULL;
1118 }
1119 
1120 static const struct net_device_ops dsa_slave_netdev_ops = {
1121 	.ndo_open	 	= dsa_slave_open,
1122 	.ndo_stop		= dsa_slave_close,
1123 	.ndo_start_xmit		= dsa_slave_xmit,
1124 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
1125 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
1126 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1127 	.ndo_fdb_add		= dsa_legacy_fdb_add,
1128 	.ndo_fdb_del		= dsa_legacy_fdb_del,
1129 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1130 	.ndo_do_ioctl		= dsa_slave_ioctl,
1131 	.ndo_get_iflink		= dsa_slave_get_iflink,
1132 #ifdef CONFIG_NET_POLL_CONTROLLER
1133 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
1134 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
1135 	.ndo_poll_controller	= dsa_slave_poll_controller,
1136 #endif
1137 	.ndo_get_phys_port_name	= dsa_slave_get_phys_port_name,
1138 	.ndo_setup_tc		= dsa_slave_setup_tc,
1139 	.ndo_get_stats64	= dsa_slave_get_stats64,
1140 	.ndo_get_port_parent_id	= dsa_slave_get_port_parent_id,
1141 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
1142 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
1143 	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
1144 };
1145 
1146 static struct device_type dsa_type = {
1147 	.name	= "dsa",
1148 };
1149 
1150 static void dsa_slave_phylink_validate(struct net_device *dev,
1151 				       unsigned long *supported,
1152 				       struct phylink_link_state *state)
1153 {
1154 	struct dsa_port *dp = dsa_slave_to_port(dev);
1155 	struct dsa_switch *ds = dp->ds;
1156 
1157 	if (!ds->ops->phylink_validate)
1158 		return;
1159 
1160 	ds->ops->phylink_validate(ds, dp->index, supported, state);
1161 }
1162 
1163 static int dsa_slave_phylink_mac_link_state(struct net_device *dev,
1164 					    struct phylink_link_state *state)
1165 {
1166 	struct dsa_port *dp = dsa_slave_to_port(dev);
1167 	struct dsa_switch *ds = dp->ds;
1168 
1169 	/* Only called for SGMII and 802.3z */
1170 	if (!ds->ops->phylink_mac_link_state)
1171 		return -EOPNOTSUPP;
1172 
1173 	return ds->ops->phylink_mac_link_state(ds, dp->index, state);
1174 }
1175 
1176 static void dsa_slave_phylink_mac_config(struct net_device *dev,
1177 					 unsigned int mode,
1178 					 const struct phylink_link_state *state)
1179 {
1180 	struct dsa_port *dp = dsa_slave_to_port(dev);
1181 	struct dsa_switch *ds = dp->ds;
1182 
1183 	if (!ds->ops->phylink_mac_config)
1184 		return;
1185 
1186 	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1187 }
1188 
1189 static void dsa_slave_phylink_mac_an_restart(struct net_device *dev)
1190 {
1191 	struct dsa_port *dp = dsa_slave_to_port(dev);
1192 	struct dsa_switch *ds = dp->ds;
1193 
1194 	if (!ds->ops->phylink_mac_an_restart)
1195 		return;
1196 
1197 	ds->ops->phylink_mac_an_restart(ds, dp->index);
1198 }
1199 
1200 static void dsa_slave_phylink_mac_link_down(struct net_device *dev,
1201 					    unsigned int mode,
1202 					    phy_interface_t interface)
1203 {
1204 	struct dsa_port *dp = dsa_slave_to_port(dev);
1205 	struct dsa_switch *ds = dp->ds;
1206 
1207 	if (!ds->ops->phylink_mac_link_down) {
1208 		if (ds->ops->adjust_link && dev->phydev)
1209 			ds->ops->adjust_link(ds, dp->index, dev->phydev);
1210 		return;
1211 	}
1212 
1213 	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1214 }
1215 
1216 static void dsa_slave_phylink_mac_link_up(struct net_device *dev,
1217 					  unsigned int mode,
1218 					  phy_interface_t interface,
1219 					  struct phy_device *phydev)
1220 {
1221 	struct dsa_port *dp = dsa_slave_to_port(dev);
1222 	struct dsa_switch *ds = dp->ds;
1223 
1224 	if (!ds->ops->phylink_mac_link_up) {
1225 		if (ds->ops->adjust_link && dev->phydev)
1226 			ds->ops->adjust_link(ds, dp->index, dev->phydev);
1227 		return;
1228 	}
1229 
1230 	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev);
1231 }
1232 
1233 static const struct phylink_mac_ops dsa_slave_phylink_mac_ops = {
1234 	.validate = dsa_slave_phylink_validate,
1235 	.mac_link_state = dsa_slave_phylink_mac_link_state,
1236 	.mac_config = dsa_slave_phylink_mac_config,
1237 	.mac_an_restart = dsa_slave_phylink_mac_an_restart,
1238 	.mac_link_down = dsa_slave_phylink_mac_link_down,
1239 	.mac_link_up = dsa_slave_phylink_mac_link_up,
1240 };
1241 
1242 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1243 {
1244 	const struct dsa_port *dp = dsa_to_port(ds, port);
1245 
1246 	phylink_mac_change(dp->pl, up);
1247 }
1248 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1249 
1250 static void dsa_slave_phylink_fixed_state(struct net_device *dev,
1251 					  struct phylink_link_state *state)
1252 {
1253 	struct dsa_port *dp = dsa_slave_to_port(dev);
1254 	struct dsa_switch *ds = dp->ds;
1255 
1256 	/* No need to check that this operation is valid, the callback would
1257 	 * not be called if it was not.
1258 	 */
1259 	ds->ops->phylink_fixed_state(ds, dp->index, state);
1260 }
1261 
1262 /* slave device setup *******************************************************/
1263 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1264 {
1265 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1266 	struct dsa_switch *ds = dp->ds;
1267 
1268 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1269 	if (!slave_dev->phydev) {
1270 		netdev_err(slave_dev, "no phy at %d\n", addr);
1271 		return -ENODEV;
1272 	}
1273 
1274 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
1275 }
1276 
1277 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1278 {
1279 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1280 	struct device_node *port_dn = dp->dn;
1281 	struct dsa_switch *ds = dp->ds;
1282 	u32 phy_flags = 0;
1283 	int mode, ret;
1284 
1285 	mode = of_get_phy_mode(port_dn);
1286 	if (mode < 0)
1287 		mode = PHY_INTERFACE_MODE_NA;
1288 
1289 	dp->pl = phylink_create(slave_dev, of_fwnode_handle(port_dn), mode,
1290 				&dsa_slave_phylink_mac_ops);
1291 	if (IS_ERR(dp->pl)) {
1292 		netdev_err(slave_dev,
1293 			   "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1294 		return PTR_ERR(dp->pl);
1295 	}
1296 
1297 	/* Register only if the switch provides such a callback, since this
1298 	 * callback takes precedence over polling the link GPIO in PHYLINK
1299 	 * (see phylink_get_fixed_state).
1300 	 */
1301 	if (ds->ops->phylink_fixed_state)
1302 		phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state);
1303 
1304 	if (ds->ops->get_phy_flags)
1305 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1306 
1307 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1308 	if (ret == -ENODEV && ds->slave_mii_bus) {
1309 		/* We could not connect to a designated PHY or SFP, so try to
1310 		 * use the switch internal MDIO bus instead
1311 		 */
1312 		ret = dsa_slave_phy_connect(slave_dev, dp->index);
1313 		if (ret) {
1314 			netdev_err(slave_dev,
1315 				   "failed to connect to port %d: %d\n",
1316 				   dp->index, ret);
1317 			phylink_destroy(dp->pl);
1318 			return ret;
1319 		}
1320 	}
1321 
1322 	return ret;
1323 }
1324 
1325 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1326 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1327 					    struct netdev_queue *txq,
1328 					    void *_unused)
1329 {
1330 	lockdep_set_class(&txq->_xmit_lock,
1331 			  &dsa_slave_netdev_xmit_lock_key);
1332 }
1333 
1334 int dsa_slave_suspend(struct net_device *slave_dev)
1335 {
1336 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1337 
1338 	if (!netif_running(slave_dev))
1339 		return 0;
1340 
1341 	netif_device_detach(slave_dev);
1342 
1343 	rtnl_lock();
1344 	phylink_stop(dp->pl);
1345 	rtnl_unlock();
1346 
1347 	return 0;
1348 }
1349 
1350 int dsa_slave_resume(struct net_device *slave_dev)
1351 {
1352 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1353 
1354 	if (!netif_running(slave_dev))
1355 		return 0;
1356 
1357 	netif_device_attach(slave_dev);
1358 
1359 	rtnl_lock();
1360 	phylink_start(dp->pl);
1361 	rtnl_unlock();
1362 
1363 	return 0;
1364 }
1365 
1366 static void dsa_slave_notify(struct net_device *dev, unsigned long val)
1367 {
1368 	struct net_device *master = dsa_slave_to_master(dev);
1369 	struct dsa_port *dp = dsa_slave_to_port(dev);
1370 	struct dsa_notifier_register_info rinfo = {
1371 		.switch_number = dp->ds->index,
1372 		.port_number = dp->index,
1373 		.master = master,
1374 		.info.dev = dev,
1375 	};
1376 
1377 	call_dsa_notifiers(val, dev, &rinfo.info);
1378 }
1379 
1380 int dsa_slave_create(struct dsa_port *port)
1381 {
1382 	const struct dsa_port *cpu_dp = port->cpu_dp;
1383 	struct net_device *master = cpu_dp->master;
1384 	struct dsa_switch *ds = port->ds;
1385 	const char *name = port->name;
1386 	struct net_device *slave_dev;
1387 	struct dsa_slave_priv *p;
1388 	int ret;
1389 
1390 	if (!ds->num_tx_queues)
1391 		ds->num_tx_queues = 1;
1392 
1393 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1394 				     NET_NAME_UNKNOWN, ether_setup,
1395 				     ds->num_tx_queues, 1);
1396 	if (slave_dev == NULL)
1397 		return -ENOMEM;
1398 
1399 	slave_dev->features = master->vlan_features | NETIF_F_HW_TC |
1400 				NETIF_F_HW_VLAN_CTAG_FILTER;
1401 	slave_dev->hw_features |= NETIF_F_HW_TC;
1402 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1403 	if (port->mac && is_valid_ether_addr(port->mac))
1404 		ether_addr_copy(slave_dev->dev_addr, port->mac);
1405 	else
1406 		eth_hw_addr_inherit(slave_dev, master);
1407 	slave_dev->priv_flags |= IFF_NO_QUEUE;
1408 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1409 	slave_dev->min_mtu = 0;
1410 	slave_dev->max_mtu = ETH_MAX_MTU;
1411 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1412 
1413 	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1414 				 NULL);
1415 
1416 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
1417 	slave_dev->dev.of_node = port->dn;
1418 	slave_dev->vlan_features = master->vlan_features;
1419 
1420 	p = netdev_priv(slave_dev);
1421 	p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1422 	if (!p->stats64) {
1423 		free_netdev(slave_dev);
1424 		return -ENOMEM;
1425 	}
1426 	p->dp = port;
1427 	INIT_LIST_HEAD(&p->mall_tc_list);
1428 	p->xmit = cpu_dp->tag_ops->xmit;
1429 	port->slave = slave_dev;
1430 
1431 	netif_carrier_off(slave_dev);
1432 
1433 	ret = dsa_slave_phy_setup(slave_dev);
1434 	if (ret) {
1435 		netdev_err(master, "error %d setting up slave phy\n", ret);
1436 		goto out_free;
1437 	}
1438 
1439 	dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1440 
1441 	ret = register_netdev(slave_dev);
1442 	if (ret) {
1443 		netdev_err(master, "error %d registering interface %s\n",
1444 			   ret, slave_dev->name);
1445 		goto out_phy;
1446 	}
1447 
1448 	return 0;
1449 
1450 out_phy:
1451 	rtnl_lock();
1452 	phylink_disconnect_phy(p->dp->pl);
1453 	rtnl_unlock();
1454 	phylink_destroy(p->dp->pl);
1455 out_free:
1456 	free_percpu(p->stats64);
1457 	free_netdev(slave_dev);
1458 	port->slave = NULL;
1459 	return ret;
1460 }
1461 
1462 void dsa_slave_destroy(struct net_device *slave_dev)
1463 {
1464 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1465 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1466 
1467 	netif_carrier_off(slave_dev);
1468 	rtnl_lock();
1469 	phylink_disconnect_phy(dp->pl);
1470 	rtnl_unlock();
1471 
1472 	dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1473 	unregister_netdev(slave_dev);
1474 	phylink_destroy(dp->pl);
1475 	free_percpu(p->stats64);
1476 	free_netdev(slave_dev);
1477 }
1478 
1479 static bool dsa_slave_dev_check(struct net_device *dev)
1480 {
1481 	return dev->netdev_ops == &dsa_slave_netdev_ops;
1482 }
1483 
1484 static int dsa_slave_changeupper(struct net_device *dev,
1485 				 struct netdev_notifier_changeupper_info *info)
1486 {
1487 	struct dsa_port *dp = dsa_slave_to_port(dev);
1488 	int err = NOTIFY_DONE;
1489 
1490 	if (netif_is_bridge_master(info->upper_dev)) {
1491 		if (info->linking) {
1492 			err = dsa_port_bridge_join(dp, info->upper_dev);
1493 			err = notifier_from_errno(err);
1494 		} else {
1495 			dsa_port_bridge_leave(dp, info->upper_dev);
1496 			err = NOTIFY_OK;
1497 		}
1498 	}
1499 
1500 	return err;
1501 }
1502 
1503 static int dsa_slave_upper_vlan_check(struct net_device *dev,
1504 				      struct netdev_notifier_changeupper_info *
1505 				      info)
1506 {
1507 	struct netlink_ext_ack *ext_ack;
1508 	struct net_device *slave;
1509 	struct dsa_port *dp;
1510 
1511 	ext_ack = netdev_notifier_info_to_extack(&info->info);
1512 
1513 	if (!is_vlan_dev(dev))
1514 		return NOTIFY_DONE;
1515 
1516 	slave = vlan_dev_real_dev(dev);
1517 	if (!dsa_slave_dev_check(slave))
1518 		return NOTIFY_DONE;
1519 
1520 	dp = dsa_slave_to_port(slave);
1521 	if (!dp->bridge_dev)
1522 		return NOTIFY_DONE;
1523 
1524 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
1525 	if (br_vlan_enabled(dp->bridge_dev) &&
1526 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
1527 		NL_SET_ERR_MSG_MOD(ext_ack,
1528 				   "Cannot enslave VLAN device into VLAN aware bridge");
1529 		return notifier_from_errno(-EINVAL);
1530 	}
1531 
1532 	return NOTIFY_DONE;
1533 }
1534 
1535 static int dsa_slave_netdevice_event(struct notifier_block *nb,
1536 				     unsigned long event, void *ptr)
1537 {
1538 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1539 
1540 	if (event == NETDEV_CHANGEUPPER) {
1541 		if (!dsa_slave_dev_check(dev))
1542 			return dsa_slave_upper_vlan_check(dev, ptr);
1543 
1544 		return dsa_slave_changeupper(dev, ptr);
1545 	}
1546 
1547 	return NOTIFY_DONE;
1548 }
1549 
1550 static int
1551 dsa_slave_switchdev_port_attr_set_event(struct net_device *netdev,
1552 		struct switchdev_notifier_port_attr_info *port_attr_info)
1553 {
1554 	int err;
1555 
1556 	err = dsa_slave_port_attr_set(netdev, port_attr_info->attr,
1557 				      port_attr_info->trans);
1558 
1559 	port_attr_info->handled = true;
1560 	return notifier_from_errno(err);
1561 }
1562 
1563 struct dsa_switchdev_event_work {
1564 	struct work_struct work;
1565 	struct switchdev_notifier_fdb_info fdb_info;
1566 	struct net_device *dev;
1567 	unsigned long event;
1568 };
1569 
1570 static void dsa_slave_switchdev_event_work(struct work_struct *work)
1571 {
1572 	struct dsa_switchdev_event_work *switchdev_work =
1573 		container_of(work, struct dsa_switchdev_event_work, work);
1574 	struct net_device *dev = switchdev_work->dev;
1575 	struct switchdev_notifier_fdb_info *fdb_info;
1576 	struct dsa_port *dp = dsa_slave_to_port(dev);
1577 	int err;
1578 
1579 	rtnl_lock();
1580 	switch (switchdev_work->event) {
1581 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
1582 		fdb_info = &switchdev_work->fdb_info;
1583 		if (!fdb_info->added_by_user)
1584 			break;
1585 
1586 		err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
1587 		if (err) {
1588 			netdev_dbg(dev, "fdb add failed err=%d\n", err);
1589 			break;
1590 		}
1591 		fdb_info->offloaded = true;
1592 		call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
1593 					 &fdb_info->info, NULL);
1594 		break;
1595 
1596 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
1597 		fdb_info = &switchdev_work->fdb_info;
1598 		if (!fdb_info->added_by_user)
1599 			break;
1600 
1601 		err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
1602 		if (err) {
1603 			netdev_dbg(dev, "fdb del failed err=%d\n", err);
1604 			dev_close(dev);
1605 		}
1606 		break;
1607 	}
1608 	rtnl_unlock();
1609 
1610 	kfree(switchdev_work->fdb_info.addr);
1611 	kfree(switchdev_work);
1612 	dev_put(dev);
1613 }
1614 
1615 static int
1616 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
1617 				  switchdev_work,
1618 				  const struct switchdev_notifier_fdb_info *
1619 				  fdb_info)
1620 {
1621 	memcpy(&switchdev_work->fdb_info, fdb_info,
1622 	       sizeof(switchdev_work->fdb_info));
1623 	switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1624 	if (!switchdev_work->fdb_info.addr)
1625 		return -ENOMEM;
1626 	ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1627 			fdb_info->addr);
1628 	return 0;
1629 }
1630 
1631 /* Called under rcu_read_lock() */
1632 static int dsa_slave_switchdev_event(struct notifier_block *unused,
1633 				     unsigned long event, void *ptr)
1634 {
1635 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1636 	struct dsa_switchdev_event_work *switchdev_work;
1637 
1638 	if (!dsa_slave_dev_check(dev))
1639 		return NOTIFY_DONE;
1640 
1641 	if (event == SWITCHDEV_PORT_ATTR_SET)
1642 		return dsa_slave_switchdev_port_attr_set_event(dev, ptr);
1643 
1644 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1645 	if (!switchdev_work)
1646 		return NOTIFY_BAD;
1647 
1648 	INIT_WORK(&switchdev_work->work,
1649 		  dsa_slave_switchdev_event_work);
1650 	switchdev_work->dev = dev;
1651 	switchdev_work->event = event;
1652 
1653 	switch (event) {
1654 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1655 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
1656 		if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
1657 			goto err_fdb_work_init;
1658 		dev_hold(dev);
1659 		break;
1660 	default:
1661 		kfree(switchdev_work);
1662 		return NOTIFY_DONE;
1663 	}
1664 
1665 	dsa_schedule_work(&switchdev_work->work);
1666 	return NOTIFY_OK;
1667 
1668 err_fdb_work_init:
1669 	kfree(switchdev_work);
1670 	return NOTIFY_BAD;
1671 }
1672 
1673 static int
1674 dsa_slave_switchdev_port_obj_event(unsigned long event,
1675 			struct net_device *netdev,
1676 			struct switchdev_notifier_port_obj_info *port_obj_info)
1677 {
1678 	int err = -EOPNOTSUPP;
1679 
1680 	switch (event) {
1681 	case SWITCHDEV_PORT_OBJ_ADD:
1682 		err = dsa_slave_port_obj_add(netdev, port_obj_info->obj,
1683 					     port_obj_info->trans);
1684 		break;
1685 	case SWITCHDEV_PORT_OBJ_DEL:
1686 		err = dsa_slave_port_obj_del(netdev, port_obj_info->obj);
1687 		break;
1688 	}
1689 
1690 	port_obj_info->handled = true;
1691 	return notifier_from_errno(err);
1692 }
1693 
1694 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
1695 					      unsigned long event, void *ptr)
1696 {
1697 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1698 
1699 	if (!dsa_slave_dev_check(dev))
1700 		return NOTIFY_DONE;
1701 
1702 	switch (event) {
1703 	case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
1704 	case SWITCHDEV_PORT_OBJ_DEL:
1705 		return dsa_slave_switchdev_port_obj_event(event, dev, ptr);
1706 	case SWITCHDEV_PORT_ATTR_SET:
1707 		return dsa_slave_switchdev_port_attr_set_event(dev, ptr);
1708 	}
1709 
1710 	return NOTIFY_DONE;
1711 }
1712 
1713 static struct notifier_block dsa_slave_nb __read_mostly = {
1714 	.notifier_call  = dsa_slave_netdevice_event,
1715 };
1716 
1717 static struct notifier_block dsa_slave_switchdev_notifier = {
1718 	.notifier_call = dsa_slave_switchdev_event,
1719 };
1720 
1721 static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
1722 	.notifier_call = dsa_slave_switchdev_blocking_event,
1723 };
1724 
1725 int dsa_slave_register_notifier(void)
1726 {
1727 	struct notifier_block *nb;
1728 	int err;
1729 
1730 	err = register_netdevice_notifier(&dsa_slave_nb);
1731 	if (err)
1732 		return err;
1733 
1734 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
1735 	if (err)
1736 		goto err_switchdev_nb;
1737 
1738 	nb = &dsa_slave_switchdev_blocking_notifier;
1739 	err = register_switchdev_blocking_notifier(nb);
1740 	if (err)
1741 		goto err_switchdev_blocking_nb;
1742 
1743 	return 0;
1744 
1745 err_switchdev_blocking_nb:
1746 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1747 err_switchdev_nb:
1748 	unregister_netdevice_notifier(&dsa_slave_nb);
1749 	return err;
1750 }
1751 
1752 void dsa_slave_unregister_notifier(void)
1753 {
1754 	struct notifier_block *nb;
1755 	int err;
1756 
1757 	nb = &dsa_slave_switchdev_blocking_notifier;
1758 	err = unregister_switchdev_blocking_notifier(nb);
1759 	if (err)
1760 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
1761 
1762 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1763 	if (err)
1764 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
1765 
1766 	err = unregister_netdevice_notifier(&dsa_slave_nb);
1767 	if (err)
1768 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
1769 }
1770