xref: /openbmc/linux/net/dsa/slave.c (revision d32f834cd6873d9a5ed18ad028700f60d1688cf3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/netpoll.h>
21 #include <linux/ptp_classify.h>
22 
23 #include "dsa_priv.h"
24 
25 /* slave mii_bus handling ***************************************************/
26 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
27 {
28 	struct dsa_switch *ds = bus->priv;
29 
30 	if (ds->phys_mii_mask & (1 << addr))
31 		return ds->ops->phy_read(ds, addr, reg);
32 
33 	return 0xffff;
34 }
35 
36 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
37 {
38 	struct dsa_switch *ds = bus->priv;
39 
40 	if (ds->phys_mii_mask & (1 << addr))
41 		return ds->ops->phy_write(ds, addr, reg, val);
42 
43 	return 0;
44 }
45 
46 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
47 {
48 	ds->slave_mii_bus->priv = (void *)ds;
49 	ds->slave_mii_bus->name = "dsa slave smi";
50 	ds->slave_mii_bus->read = dsa_slave_phy_read;
51 	ds->slave_mii_bus->write = dsa_slave_phy_write;
52 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
53 		 ds->dst->index, ds->index);
54 	ds->slave_mii_bus->parent = ds->dev;
55 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
56 }
57 
58 
59 /* slave device handling ****************************************************/
60 static int dsa_slave_get_iflink(const struct net_device *dev)
61 {
62 	return dsa_slave_to_master(dev)->ifindex;
63 }
64 
65 static int dsa_slave_open(struct net_device *dev)
66 {
67 	struct net_device *master = dsa_slave_to_master(dev);
68 	struct dsa_port *dp = dsa_slave_to_port(dev);
69 	int err;
70 
71 	if (!(master->flags & IFF_UP))
72 		return -ENETDOWN;
73 
74 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
75 		err = dev_uc_add(master, dev->dev_addr);
76 		if (err < 0)
77 			goto out;
78 	}
79 
80 	if (dev->flags & IFF_ALLMULTI) {
81 		err = dev_set_allmulti(master, 1);
82 		if (err < 0)
83 			goto del_unicast;
84 	}
85 	if (dev->flags & IFF_PROMISC) {
86 		err = dev_set_promiscuity(master, 1);
87 		if (err < 0)
88 			goto clear_allmulti;
89 	}
90 
91 	err = dsa_port_enable_rt(dp, dev->phydev);
92 	if (err)
93 		goto clear_promisc;
94 
95 	return 0;
96 
97 clear_promisc:
98 	if (dev->flags & IFF_PROMISC)
99 		dev_set_promiscuity(master, -1);
100 clear_allmulti:
101 	if (dev->flags & IFF_ALLMULTI)
102 		dev_set_allmulti(master, -1);
103 del_unicast:
104 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
105 		dev_uc_del(master, dev->dev_addr);
106 out:
107 	return err;
108 }
109 
110 static int dsa_slave_close(struct net_device *dev)
111 {
112 	struct net_device *master = dsa_slave_to_master(dev);
113 	struct dsa_port *dp = dsa_slave_to_port(dev);
114 
115 	dsa_port_disable_rt(dp);
116 
117 	dev_mc_unsync(master, dev);
118 	dev_uc_unsync(master, dev);
119 	if (dev->flags & IFF_ALLMULTI)
120 		dev_set_allmulti(master, -1);
121 	if (dev->flags & IFF_PROMISC)
122 		dev_set_promiscuity(master, -1);
123 
124 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
125 		dev_uc_del(master, dev->dev_addr);
126 
127 	return 0;
128 }
129 
130 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
131 {
132 	struct net_device *master = dsa_slave_to_master(dev);
133 	if (dev->flags & IFF_UP) {
134 		if (change & IFF_ALLMULTI)
135 			dev_set_allmulti(master,
136 					 dev->flags & IFF_ALLMULTI ? 1 : -1);
137 		if (change & IFF_PROMISC)
138 			dev_set_promiscuity(master,
139 					    dev->flags & IFF_PROMISC ? 1 : -1);
140 	}
141 }
142 
143 static void dsa_slave_set_rx_mode(struct net_device *dev)
144 {
145 	struct net_device *master = dsa_slave_to_master(dev);
146 
147 	dev_mc_sync(master, dev);
148 	dev_uc_sync(master, dev);
149 }
150 
151 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
152 {
153 	struct net_device *master = dsa_slave_to_master(dev);
154 	struct sockaddr *addr = a;
155 	int err;
156 
157 	if (!is_valid_ether_addr(addr->sa_data))
158 		return -EADDRNOTAVAIL;
159 
160 	if (!(dev->flags & IFF_UP))
161 		goto out;
162 
163 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
164 		err = dev_uc_add(master, addr->sa_data);
165 		if (err < 0)
166 			return err;
167 	}
168 
169 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
170 		dev_uc_del(master, dev->dev_addr);
171 
172 out:
173 	ether_addr_copy(dev->dev_addr, addr->sa_data);
174 
175 	return 0;
176 }
177 
178 struct dsa_slave_dump_ctx {
179 	struct net_device *dev;
180 	struct sk_buff *skb;
181 	struct netlink_callback *cb;
182 	int idx;
183 };
184 
185 static int
186 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
187 			   bool is_static, void *data)
188 {
189 	struct dsa_slave_dump_ctx *dump = data;
190 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
191 	u32 seq = dump->cb->nlh->nlmsg_seq;
192 	struct nlmsghdr *nlh;
193 	struct ndmsg *ndm;
194 
195 	if (dump->idx < dump->cb->args[2])
196 		goto skip;
197 
198 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
199 			sizeof(*ndm), NLM_F_MULTI);
200 	if (!nlh)
201 		return -EMSGSIZE;
202 
203 	ndm = nlmsg_data(nlh);
204 	ndm->ndm_family  = AF_BRIDGE;
205 	ndm->ndm_pad1    = 0;
206 	ndm->ndm_pad2    = 0;
207 	ndm->ndm_flags   = NTF_SELF;
208 	ndm->ndm_type    = 0;
209 	ndm->ndm_ifindex = dump->dev->ifindex;
210 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
211 
212 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
213 		goto nla_put_failure;
214 
215 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
216 		goto nla_put_failure;
217 
218 	nlmsg_end(dump->skb, nlh);
219 
220 skip:
221 	dump->idx++;
222 	return 0;
223 
224 nla_put_failure:
225 	nlmsg_cancel(dump->skb, nlh);
226 	return -EMSGSIZE;
227 }
228 
229 static int
230 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
231 		   struct net_device *dev, struct net_device *filter_dev,
232 		   int *idx)
233 {
234 	struct dsa_port *dp = dsa_slave_to_port(dev);
235 	struct dsa_slave_dump_ctx dump = {
236 		.dev = dev,
237 		.skb = skb,
238 		.cb = cb,
239 		.idx = *idx,
240 	};
241 	int err;
242 
243 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
244 	*idx = dump.idx;
245 
246 	return err;
247 }
248 
249 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
250 {
251 	struct dsa_slave_priv *p = netdev_priv(dev);
252 	struct dsa_switch *ds = p->dp->ds;
253 	int port = p->dp->index;
254 
255 	/* Pass through to switch driver if it supports timestamping */
256 	switch (cmd) {
257 	case SIOCGHWTSTAMP:
258 		if (ds->ops->port_hwtstamp_get)
259 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
260 		break;
261 	case SIOCSHWTSTAMP:
262 		if (ds->ops->port_hwtstamp_set)
263 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
264 		break;
265 	}
266 
267 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
268 }
269 
270 static int dsa_slave_port_attr_set(struct net_device *dev,
271 				   const struct switchdev_attr *attr)
272 {
273 	struct dsa_port *dp = dsa_slave_to_port(dev);
274 	int ret;
275 
276 	if (!dsa_port_offloads_netdev(dp, attr->orig_dev))
277 		return -EOPNOTSUPP;
278 
279 	switch (attr->id) {
280 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
281 		ret = dsa_port_set_state(dp, attr->u.stp_state);
282 		break;
283 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
284 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering);
285 		break;
286 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
287 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
288 		break;
289 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
290 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags);
291 		break;
292 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
293 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags);
294 		break;
295 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
296 		ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter);
297 		break;
298 	default:
299 		ret = -EOPNOTSUPP;
300 		break;
301 	}
302 
303 	return ret;
304 }
305 
306 /* Must be called under rcu_read_lock() */
307 static int
308 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
309 				      const struct switchdev_obj_port_vlan *vlan)
310 {
311 	struct net_device *upper_dev;
312 	struct list_head *iter;
313 
314 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
315 		u16 vid;
316 
317 		if (!is_vlan_dev(upper_dev))
318 			continue;
319 
320 		vid = vlan_dev_vlan_id(upper_dev);
321 		if (vid == vlan->vid)
322 			return -EBUSY;
323 	}
324 
325 	return 0;
326 }
327 
328 static int dsa_slave_vlan_add(struct net_device *dev,
329 			      const struct switchdev_obj *obj,
330 			      struct netlink_ext_ack *extack)
331 {
332 	struct net_device *master = dsa_slave_to_master(dev);
333 	struct dsa_port *dp = dsa_slave_to_port(dev);
334 	struct switchdev_obj_port_vlan vlan;
335 	int err;
336 
337 	if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
338 		return -EOPNOTSUPP;
339 
340 	if (dsa_port_skip_vlan_configuration(dp)) {
341 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
342 		return 0;
343 	}
344 
345 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
346 
347 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
348 	 * the same VID.
349 	 */
350 	if (br_vlan_enabled(dp->bridge_dev)) {
351 		rcu_read_lock();
352 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
353 		rcu_read_unlock();
354 		if (err)
355 			return err;
356 	}
357 
358 	err = dsa_port_vlan_add(dp, &vlan);
359 	if (err)
360 		return err;
361 
362 	/* We need the dedicated CPU port to be a member of the VLAN as well.
363 	 * Even though drivers often handle CPU membership in special ways,
364 	 * it doesn't make sense to program a PVID, so clear this flag.
365 	 */
366 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
367 
368 	err = dsa_port_vlan_add(dp->cpu_dp, &vlan);
369 	if (err)
370 		return err;
371 
372 	return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
373 }
374 
375 static int dsa_slave_port_obj_add(struct net_device *dev,
376 				  const struct switchdev_obj *obj,
377 				  struct netlink_ext_ack *extack)
378 {
379 	struct dsa_port *dp = dsa_slave_to_port(dev);
380 	int err;
381 
382 	switch (obj->id) {
383 	case SWITCHDEV_OBJ_ID_PORT_MDB:
384 		if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
385 			return -EOPNOTSUPP;
386 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
387 		break;
388 	case SWITCHDEV_OBJ_ID_HOST_MDB:
389 		/* DSA can directly translate this to a normal MDB add,
390 		 * but on the CPU port.
391 		 */
392 		err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
393 		break;
394 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
395 		err = dsa_slave_vlan_add(dev, obj, extack);
396 		break;
397 	default:
398 		err = -EOPNOTSUPP;
399 		break;
400 	}
401 
402 	return err;
403 }
404 
405 static int dsa_slave_vlan_del(struct net_device *dev,
406 			      const struct switchdev_obj *obj)
407 {
408 	struct net_device *master = dsa_slave_to_master(dev);
409 	struct dsa_port *dp = dsa_slave_to_port(dev);
410 	struct switchdev_obj_port_vlan *vlan;
411 	int err;
412 
413 	if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
414 		return -EOPNOTSUPP;
415 
416 	if (dsa_port_skip_vlan_configuration(dp))
417 		return 0;
418 
419 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
420 
421 	/* Do not deprogram the CPU port as it may be shared with other user
422 	 * ports which can be members of this VLAN as well.
423 	 */
424 	err = dsa_port_vlan_del(dp, vlan);
425 	if (err)
426 		return err;
427 
428 	vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
429 
430 	return 0;
431 }
432 
433 static int dsa_slave_port_obj_del(struct net_device *dev,
434 				  const struct switchdev_obj *obj)
435 {
436 	struct dsa_port *dp = dsa_slave_to_port(dev);
437 	int err;
438 
439 	switch (obj->id) {
440 	case SWITCHDEV_OBJ_ID_PORT_MDB:
441 		if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
442 			return -EOPNOTSUPP;
443 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
444 		break;
445 	case SWITCHDEV_OBJ_ID_HOST_MDB:
446 		/* DSA can directly translate this to a normal MDB add,
447 		 * but on the CPU port.
448 		 */
449 		err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
450 		break;
451 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
452 		err = dsa_slave_vlan_del(dev, obj);
453 		break;
454 	default:
455 		err = -EOPNOTSUPP;
456 		break;
457 	}
458 
459 	return err;
460 }
461 
462 static int dsa_slave_get_port_parent_id(struct net_device *dev,
463 					struct netdev_phys_item_id *ppid)
464 {
465 	struct dsa_port *dp = dsa_slave_to_port(dev);
466 	struct dsa_switch *ds = dp->ds;
467 	struct dsa_switch_tree *dst = ds->dst;
468 
469 	/* For non-legacy ports, devlink is used and it takes
470 	 * care of the name generation. This ndo implementation
471 	 * should be removed with legacy support.
472 	 */
473 	if (dp->ds->devlink)
474 		return -EOPNOTSUPP;
475 
476 	ppid->id_len = sizeof(dst->index);
477 	memcpy(&ppid->id, &dst->index, ppid->id_len);
478 
479 	return 0;
480 }
481 
482 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
483 						     struct sk_buff *skb)
484 {
485 #ifdef CONFIG_NET_POLL_CONTROLLER
486 	struct dsa_slave_priv *p = netdev_priv(dev);
487 
488 	return netpoll_send_skb(p->netpoll, skb);
489 #else
490 	BUG();
491 	return NETDEV_TX_OK;
492 #endif
493 }
494 
495 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
496 				 struct sk_buff *skb)
497 {
498 	struct dsa_switch *ds = p->dp->ds;
499 	struct sk_buff *clone;
500 	unsigned int type;
501 
502 	type = ptp_classify_raw(skb);
503 	if (type == PTP_CLASS_NONE)
504 		return;
505 
506 	if (!ds->ops->port_txtstamp)
507 		return;
508 
509 	clone = skb_clone_sk(skb);
510 	if (!clone)
511 		return;
512 
513 	if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) {
514 		DSA_SKB_CB(skb)->clone = clone;
515 		return;
516 	}
517 
518 	kfree_skb(clone);
519 }
520 
521 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
522 {
523 	/* SKB for netpoll still need to be mangled with the protocol-specific
524 	 * tag to be successfully transmitted
525 	 */
526 	if (unlikely(netpoll_tx_running(dev)))
527 		return dsa_slave_netpoll_send_skb(dev, skb);
528 
529 	/* Queue the SKB for transmission on the parent interface, but
530 	 * do not modify its EtherType
531 	 */
532 	skb->dev = dsa_slave_to_master(dev);
533 	dev_queue_xmit(skb);
534 
535 	return NETDEV_TX_OK;
536 }
537 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
538 
539 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
540 {
541 	int needed_headroom = dev->needed_headroom;
542 	int needed_tailroom = dev->needed_tailroom;
543 
544 	/* For tail taggers, we need to pad short frames ourselves, to ensure
545 	 * that the tail tag does not fail at its role of being at the end of
546 	 * the packet, once the master interface pads the frame. Account for
547 	 * that pad length here, and pad later.
548 	 */
549 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
550 		needed_tailroom += ETH_ZLEN - skb->len;
551 	/* skb_headroom() returns unsigned int... */
552 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
553 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
554 
555 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
556 		/* No reallocation needed, yay! */
557 		return 0;
558 
559 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
560 				GFP_ATOMIC);
561 }
562 
563 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
564 {
565 	struct dsa_slave_priv *p = netdev_priv(dev);
566 	struct sk_buff *nskb;
567 
568 	dev_sw_netstats_tx_add(dev, 1, skb->len);
569 
570 	DSA_SKB_CB(skb)->clone = NULL;
571 
572 	/* Identify PTP protocol packets, clone them, and pass them to the
573 	 * switch driver
574 	 */
575 	dsa_skb_tx_timestamp(p, skb);
576 
577 	if (dsa_realloc_skb(skb, dev)) {
578 		dev_kfree_skb_any(skb);
579 		return NETDEV_TX_OK;
580 	}
581 
582 	/* needed_tailroom should still be 'warm' in the cache line from
583 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
584 	 */
585 	if (dev->needed_tailroom)
586 		eth_skb_pad(skb);
587 
588 	/* Transmit function may have to reallocate the original SKB,
589 	 * in which case it must have freed it. Only free it here on error.
590 	 */
591 	nskb = p->xmit(skb, dev);
592 	if (!nskb) {
593 		kfree_skb(skb);
594 		return NETDEV_TX_OK;
595 	}
596 
597 	return dsa_enqueue_skb(nskb, dev);
598 }
599 
600 /* ethtool operations *******************************************************/
601 
602 static void dsa_slave_get_drvinfo(struct net_device *dev,
603 				  struct ethtool_drvinfo *drvinfo)
604 {
605 	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
606 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
607 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
608 }
609 
610 static int dsa_slave_get_regs_len(struct net_device *dev)
611 {
612 	struct dsa_port *dp = dsa_slave_to_port(dev);
613 	struct dsa_switch *ds = dp->ds;
614 
615 	if (ds->ops->get_regs_len)
616 		return ds->ops->get_regs_len(ds, dp->index);
617 
618 	return -EOPNOTSUPP;
619 }
620 
621 static void
622 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
623 {
624 	struct dsa_port *dp = dsa_slave_to_port(dev);
625 	struct dsa_switch *ds = dp->ds;
626 
627 	if (ds->ops->get_regs)
628 		ds->ops->get_regs(ds, dp->index, regs, _p);
629 }
630 
631 static int dsa_slave_nway_reset(struct net_device *dev)
632 {
633 	struct dsa_port *dp = dsa_slave_to_port(dev);
634 
635 	return phylink_ethtool_nway_reset(dp->pl);
636 }
637 
638 static int dsa_slave_get_eeprom_len(struct net_device *dev)
639 {
640 	struct dsa_port *dp = dsa_slave_to_port(dev);
641 	struct dsa_switch *ds = dp->ds;
642 
643 	if (ds->cd && ds->cd->eeprom_len)
644 		return ds->cd->eeprom_len;
645 
646 	if (ds->ops->get_eeprom_len)
647 		return ds->ops->get_eeprom_len(ds);
648 
649 	return 0;
650 }
651 
652 static int dsa_slave_get_eeprom(struct net_device *dev,
653 				struct ethtool_eeprom *eeprom, u8 *data)
654 {
655 	struct dsa_port *dp = dsa_slave_to_port(dev);
656 	struct dsa_switch *ds = dp->ds;
657 
658 	if (ds->ops->get_eeprom)
659 		return ds->ops->get_eeprom(ds, eeprom, data);
660 
661 	return -EOPNOTSUPP;
662 }
663 
664 static int dsa_slave_set_eeprom(struct net_device *dev,
665 				struct ethtool_eeprom *eeprom, u8 *data)
666 {
667 	struct dsa_port *dp = dsa_slave_to_port(dev);
668 	struct dsa_switch *ds = dp->ds;
669 
670 	if (ds->ops->set_eeprom)
671 		return ds->ops->set_eeprom(ds, eeprom, data);
672 
673 	return -EOPNOTSUPP;
674 }
675 
676 static void dsa_slave_get_strings(struct net_device *dev,
677 				  uint32_t stringset, uint8_t *data)
678 {
679 	struct dsa_port *dp = dsa_slave_to_port(dev);
680 	struct dsa_switch *ds = dp->ds;
681 
682 	if (stringset == ETH_SS_STATS) {
683 		int len = ETH_GSTRING_LEN;
684 
685 		strncpy(data, "tx_packets", len);
686 		strncpy(data + len, "tx_bytes", len);
687 		strncpy(data + 2 * len, "rx_packets", len);
688 		strncpy(data + 3 * len, "rx_bytes", len);
689 		if (ds->ops->get_strings)
690 			ds->ops->get_strings(ds, dp->index, stringset,
691 					     data + 4 * len);
692 	}
693 }
694 
695 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
696 					struct ethtool_stats *stats,
697 					uint64_t *data)
698 {
699 	struct dsa_port *dp = dsa_slave_to_port(dev);
700 	struct dsa_switch *ds = dp->ds;
701 	struct pcpu_sw_netstats *s;
702 	unsigned int start;
703 	int i;
704 
705 	for_each_possible_cpu(i) {
706 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
707 
708 		s = per_cpu_ptr(dev->tstats, i);
709 		do {
710 			start = u64_stats_fetch_begin_irq(&s->syncp);
711 			tx_packets = s->tx_packets;
712 			tx_bytes = s->tx_bytes;
713 			rx_packets = s->rx_packets;
714 			rx_bytes = s->rx_bytes;
715 		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
716 		data[0] += tx_packets;
717 		data[1] += tx_bytes;
718 		data[2] += rx_packets;
719 		data[3] += rx_bytes;
720 	}
721 	if (ds->ops->get_ethtool_stats)
722 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
723 }
724 
725 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
726 {
727 	struct dsa_port *dp = dsa_slave_to_port(dev);
728 	struct dsa_switch *ds = dp->ds;
729 
730 	if (sset == ETH_SS_STATS) {
731 		int count;
732 
733 		count = 4;
734 		if (ds->ops->get_sset_count)
735 			count += ds->ops->get_sset_count(ds, dp->index, sset);
736 
737 		return count;
738 	}
739 
740 	return -EOPNOTSUPP;
741 }
742 
743 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
744 {
745 	struct dsa_port *dp = dsa_slave_to_port(dev);
746 	struct dsa_switch *ds = dp->ds;
747 
748 	phylink_ethtool_get_wol(dp->pl, w);
749 
750 	if (ds->ops->get_wol)
751 		ds->ops->get_wol(ds, dp->index, w);
752 }
753 
754 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
755 {
756 	struct dsa_port *dp = dsa_slave_to_port(dev);
757 	struct dsa_switch *ds = dp->ds;
758 	int ret = -EOPNOTSUPP;
759 
760 	phylink_ethtool_set_wol(dp->pl, w);
761 
762 	if (ds->ops->set_wol)
763 		ret = ds->ops->set_wol(ds, dp->index, w);
764 
765 	return ret;
766 }
767 
768 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
769 {
770 	struct dsa_port *dp = dsa_slave_to_port(dev);
771 	struct dsa_switch *ds = dp->ds;
772 	int ret;
773 
774 	/* Port's PHY and MAC both need to be EEE capable */
775 	if (!dev->phydev || !dp->pl)
776 		return -ENODEV;
777 
778 	if (!ds->ops->set_mac_eee)
779 		return -EOPNOTSUPP;
780 
781 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
782 	if (ret)
783 		return ret;
784 
785 	return phylink_ethtool_set_eee(dp->pl, e);
786 }
787 
788 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
789 {
790 	struct dsa_port *dp = dsa_slave_to_port(dev);
791 	struct dsa_switch *ds = dp->ds;
792 	int ret;
793 
794 	/* Port's PHY and MAC both need to be EEE capable */
795 	if (!dev->phydev || !dp->pl)
796 		return -ENODEV;
797 
798 	if (!ds->ops->get_mac_eee)
799 		return -EOPNOTSUPP;
800 
801 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
802 	if (ret)
803 		return ret;
804 
805 	return phylink_ethtool_get_eee(dp->pl, e);
806 }
807 
808 static int dsa_slave_get_link_ksettings(struct net_device *dev,
809 					struct ethtool_link_ksettings *cmd)
810 {
811 	struct dsa_port *dp = dsa_slave_to_port(dev);
812 
813 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
814 }
815 
816 static int dsa_slave_set_link_ksettings(struct net_device *dev,
817 					const struct ethtool_link_ksettings *cmd)
818 {
819 	struct dsa_port *dp = dsa_slave_to_port(dev);
820 
821 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
822 }
823 
824 static void dsa_slave_get_pauseparam(struct net_device *dev,
825 				     struct ethtool_pauseparam *pause)
826 {
827 	struct dsa_port *dp = dsa_slave_to_port(dev);
828 
829 	phylink_ethtool_get_pauseparam(dp->pl, pause);
830 }
831 
832 static int dsa_slave_set_pauseparam(struct net_device *dev,
833 				    struct ethtool_pauseparam *pause)
834 {
835 	struct dsa_port *dp = dsa_slave_to_port(dev);
836 
837 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
838 }
839 
840 #ifdef CONFIG_NET_POLL_CONTROLLER
841 static int dsa_slave_netpoll_setup(struct net_device *dev,
842 				   struct netpoll_info *ni)
843 {
844 	struct net_device *master = dsa_slave_to_master(dev);
845 	struct dsa_slave_priv *p = netdev_priv(dev);
846 	struct netpoll *netpoll;
847 	int err = 0;
848 
849 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
850 	if (!netpoll)
851 		return -ENOMEM;
852 
853 	err = __netpoll_setup(netpoll, master);
854 	if (err) {
855 		kfree(netpoll);
856 		goto out;
857 	}
858 
859 	p->netpoll = netpoll;
860 out:
861 	return err;
862 }
863 
864 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
865 {
866 	struct dsa_slave_priv *p = netdev_priv(dev);
867 	struct netpoll *netpoll = p->netpoll;
868 
869 	if (!netpoll)
870 		return;
871 
872 	p->netpoll = NULL;
873 
874 	__netpoll_free(netpoll);
875 }
876 
877 static void dsa_slave_poll_controller(struct net_device *dev)
878 {
879 }
880 #endif
881 
882 static int dsa_slave_get_phys_port_name(struct net_device *dev,
883 					char *name, size_t len)
884 {
885 	struct dsa_port *dp = dsa_slave_to_port(dev);
886 
887 	/* For non-legacy ports, devlink is used and it takes
888 	 * care of the name generation. This ndo implementation
889 	 * should be removed with legacy support.
890 	 */
891 	if (dp->ds->devlink)
892 		return -EOPNOTSUPP;
893 
894 	if (snprintf(name, len, "p%d", dp->index) >= len)
895 		return -EINVAL;
896 
897 	return 0;
898 }
899 
900 static struct dsa_mall_tc_entry *
901 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
902 {
903 	struct dsa_slave_priv *p = netdev_priv(dev);
904 	struct dsa_mall_tc_entry *mall_tc_entry;
905 
906 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
907 		if (mall_tc_entry->cookie == cookie)
908 			return mall_tc_entry;
909 
910 	return NULL;
911 }
912 
913 static int
914 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
915 				  struct tc_cls_matchall_offload *cls,
916 				  bool ingress)
917 {
918 	struct dsa_port *dp = dsa_slave_to_port(dev);
919 	struct dsa_slave_priv *p = netdev_priv(dev);
920 	struct dsa_mall_mirror_tc_entry *mirror;
921 	struct dsa_mall_tc_entry *mall_tc_entry;
922 	struct dsa_switch *ds = dp->ds;
923 	struct flow_action_entry *act;
924 	struct dsa_port *to_dp;
925 	int err;
926 
927 	if (!ds->ops->port_mirror_add)
928 		return -EOPNOTSUPP;
929 
930 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
931 					      cls->common.extack))
932 		return -EOPNOTSUPP;
933 
934 	act = &cls->rule->action.entries[0];
935 
936 	if (!act->dev)
937 		return -EINVAL;
938 
939 	if (!dsa_slave_dev_check(act->dev))
940 		return -EOPNOTSUPP;
941 
942 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
943 	if (!mall_tc_entry)
944 		return -ENOMEM;
945 
946 	mall_tc_entry->cookie = cls->cookie;
947 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
948 	mirror = &mall_tc_entry->mirror;
949 
950 	to_dp = dsa_slave_to_port(act->dev);
951 
952 	mirror->to_local_port = to_dp->index;
953 	mirror->ingress = ingress;
954 
955 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
956 	if (err) {
957 		kfree(mall_tc_entry);
958 		return err;
959 	}
960 
961 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
962 
963 	return err;
964 }
965 
966 static int
967 dsa_slave_add_cls_matchall_police(struct net_device *dev,
968 				  struct tc_cls_matchall_offload *cls,
969 				  bool ingress)
970 {
971 	struct netlink_ext_ack *extack = cls->common.extack;
972 	struct dsa_port *dp = dsa_slave_to_port(dev);
973 	struct dsa_slave_priv *p = netdev_priv(dev);
974 	struct dsa_mall_policer_tc_entry *policer;
975 	struct dsa_mall_tc_entry *mall_tc_entry;
976 	struct dsa_switch *ds = dp->ds;
977 	struct flow_action_entry *act;
978 	int err;
979 
980 	if (!ds->ops->port_policer_add) {
981 		NL_SET_ERR_MSG_MOD(extack,
982 				   "Policing offload not implemented");
983 		return -EOPNOTSUPP;
984 	}
985 
986 	if (!ingress) {
987 		NL_SET_ERR_MSG_MOD(extack,
988 				   "Only supported on ingress qdisc");
989 		return -EOPNOTSUPP;
990 	}
991 
992 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
993 					      cls->common.extack))
994 		return -EOPNOTSUPP;
995 
996 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
997 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
998 			NL_SET_ERR_MSG_MOD(extack,
999 					   "Only one port policer allowed");
1000 			return -EEXIST;
1001 		}
1002 	}
1003 
1004 	act = &cls->rule->action.entries[0];
1005 
1006 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1007 	if (!mall_tc_entry)
1008 		return -ENOMEM;
1009 
1010 	mall_tc_entry->cookie = cls->cookie;
1011 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1012 	policer = &mall_tc_entry->policer;
1013 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1014 	policer->burst = act->police.burst;
1015 
1016 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1017 	if (err) {
1018 		kfree(mall_tc_entry);
1019 		return err;
1020 	}
1021 
1022 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1023 
1024 	return err;
1025 }
1026 
1027 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1028 				      struct tc_cls_matchall_offload *cls,
1029 				      bool ingress)
1030 {
1031 	int err = -EOPNOTSUPP;
1032 
1033 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1034 	    flow_offload_has_one_action(&cls->rule->action) &&
1035 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1036 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1037 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1038 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1039 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1040 
1041 	return err;
1042 }
1043 
1044 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1045 				       struct tc_cls_matchall_offload *cls)
1046 {
1047 	struct dsa_port *dp = dsa_slave_to_port(dev);
1048 	struct dsa_mall_tc_entry *mall_tc_entry;
1049 	struct dsa_switch *ds = dp->ds;
1050 
1051 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1052 	if (!mall_tc_entry)
1053 		return;
1054 
1055 	list_del(&mall_tc_entry->list);
1056 
1057 	switch (mall_tc_entry->type) {
1058 	case DSA_PORT_MALL_MIRROR:
1059 		if (ds->ops->port_mirror_del)
1060 			ds->ops->port_mirror_del(ds, dp->index,
1061 						 &mall_tc_entry->mirror);
1062 		break;
1063 	case DSA_PORT_MALL_POLICER:
1064 		if (ds->ops->port_policer_del)
1065 			ds->ops->port_policer_del(ds, dp->index);
1066 		break;
1067 	default:
1068 		WARN_ON(1);
1069 	}
1070 
1071 	kfree(mall_tc_entry);
1072 }
1073 
1074 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1075 					   struct tc_cls_matchall_offload *cls,
1076 					   bool ingress)
1077 {
1078 	if (cls->common.chain_index)
1079 		return -EOPNOTSUPP;
1080 
1081 	switch (cls->command) {
1082 	case TC_CLSMATCHALL_REPLACE:
1083 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1084 	case TC_CLSMATCHALL_DESTROY:
1085 		dsa_slave_del_cls_matchall(dev, cls);
1086 		return 0;
1087 	default:
1088 		return -EOPNOTSUPP;
1089 	}
1090 }
1091 
1092 static int dsa_slave_add_cls_flower(struct net_device *dev,
1093 				    struct flow_cls_offload *cls,
1094 				    bool ingress)
1095 {
1096 	struct dsa_port *dp = dsa_slave_to_port(dev);
1097 	struct dsa_switch *ds = dp->ds;
1098 	int port = dp->index;
1099 
1100 	if (!ds->ops->cls_flower_add)
1101 		return -EOPNOTSUPP;
1102 
1103 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1104 }
1105 
1106 static int dsa_slave_del_cls_flower(struct net_device *dev,
1107 				    struct flow_cls_offload *cls,
1108 				    bool ingress)
1109 {
1110 	struct dsa_port *dp = dsa_slave_to_port(dev);
1111 	struct dsa_switch *ds = dp->ds;
1112 	int port = dp->index;
1113 
1114 	if (!ds->ops->cls_flower_del)
1115 		return -EOPNOTSUPP;
1116 
1117 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1118 }
1119 
1120 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1121 				      struct flow_cls_offload *cls,
1122 				      bool ingress)
1123 {
1124 	struct dsa_port *dp = dsa_slave_to_port(dev);
1125 	struct dsa_switch *ds = dp->ds;
1126 	int port = dp->index;
1127 
1128 	if (!ds->ops->cls_flower_stats)
1129 		return -EOPNOTSUPP;
1130 
1131 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1132 }
1133 
1134 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1135 					 struct flow_cls_offload *cls,
1136 					 bool ingress)
1137 {
1138 	switch (cls->command) {
1139 	case FLOW_CLS_REPLACE:
1140 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1141 	case FLOW_CLS_DESTROY:
1142 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1143 	case FLOW_CLS_STATS:
1144 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1145 	default:
1146 		return -EOPNOTSUPP;
1147 	}
1148 }
1149 
1150 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1151 				       void *cb_priv, bool ingress)
1152 {
1153 	struct net_device *dev = cb_priv;
1154 
1155 	if (!tc_can_offload(dev))
1156 		return -EOPNOTSUPP;
1157 
1158 	switch (type) {
1159 	case TC_SETUP_CLSMATCHALL:
1160 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1161 	case TC_SETUP_CLSFLOWER:
1162 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1163 	default:
1164 		return -EOPNOTSUPP;
1165 	}
1166 }
1167 
1168 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1169 					  void *type_data, void *cb_priv)
1170 {
1171 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1172 }
1173 
1174 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1175 					  void *type_data, void *cb_priv)
1176 {
1177 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1178 }
1179 
1180 static LIST_HEAD(dsa_slave_block_cb_list);
1181 
1182 static int dsa_slave_setup_tc_block(struct net_device *dev,
1183 				    struct flow_block_offload *f)
1184 {
1185 	struct flow_block_cb *block_cb;
1186 	flow_setup_cb_t *cb;
1187 
1188 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1189 		cb = dsa_slave_setup_tc_block_cb_ig;
1190 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1191 		cb = dsa_slave_setup_tc_block_cb_eg;
1192 	else
1193 		return -EOPNOTSUPP;
1194 
1195 	f->driver_block_list = &dsa_slave_block_cb_list;
1196 
1197 	switch (f->command) {
1198 	case FLOW_BLOCK_BIND:
1199 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1200 			return -EBUSY;
1201 
1202 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1203 		if (IS_ERR(block_cb))
1204 			return PTR_ERR(block_cb);
1205 
1206 		flow_block_cb_add(block_cb, f);
1207 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1208 		return 0;
1209 	case FLOW_BLOCK_UNBIND:
1210 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1211 		if (!block_cb)
1212 			return -ENOENT;
1213 
1214 		flow_block_cb_remove(block_cb, f);
1215 		list_del(&block_cb->driver_list);
1216 		return 0;
1217 	default:
1218 		return -EOPNOTSUPP;
1219 	}
1220 }
1221 
1222 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1223 			      void *type_data)
1224 {
1225 	struct dsa_port *dp = dsa_slave_to_port(dev);
1226 	struct dsa_switch *ds = dp->ds;
1227 
1228 	if (type == TC_SETUP_BLOCK)
1229 		return dsa_slave_setup_tc_block(dev, type_data);
1230 
1231 	if (!ds->ops->port_setup_tc)
1232 		return -EOPNOTSUPP;
1233 
1234 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1235 }
1236 
1237 static int dsa_slave_get_rxnfc(struct net_device *dev,
1238 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1239 {
1240 	struct dsa_port *dp = dsa_slave_to_port(dev);
1241 	struct dsa_switch *ds = dp->ds;
1242 
1243 	if (!ds->ops->get_rxnfc)
1244 		return -EOPNOTSUPP;
1245 
1246 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1247 }
1248 
1249 static int dsa_slave_set_rxnfc(struct net_device *dev,
1250 			       struct ethtool_rxnfc *nfc)
1251 {
1252 	struct dsa_port *dp = dsa_slave_to_port(dev);
1253 	struct dsa_switch *ds = dp->ds;
1254 
1255 	if (!ds->ops->set_rxnfc)
1256 		return -EOPNOTSUPP;
1257 
1258 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1259 }
1260 
1261 static int dsa_slave_get_ts_info(struct net_device *dev,
1262 				 struct ethtool_ts_info *ts)
1263 {
1264 	struct dsa_slave_priv *p = netdev_priv(dev);
1265 	struct dsa_switch *ds = p->dp->ds;
1266 
1267 	if (!ds->ops->get_ts_info)
1268 		return -EOPNOTSUPP;
1269 
1270 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1271 }
1272 
1273 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1274 				     u16 vid)
1275 {
1276 	struct net_device *master = dsa_slave_to_master(dev);
1277 	struct dsa_port *dp = dsa_slave_to_port(dev);
1278 	struct switchdev_obj_port_vlan vlan = {
1279 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1280 		.vid = vid,
1281 		/* This API only allows programming tagged, non-PVID VIDs */
1282 		.flags = 0,
1283 	};
1284 	int ret;
1285 
1286 	/* User port... */
1287 	ret = dsa_port_vlan_add(dp, &vlan);
1288 	if (ret)
1289 		return ret;
1290 
1291 	/* And CPU port... */
1292 	ret = dsa_port_vlan_add(dp->cpu_dp, &vlan);
1293 	if (ret)
1294 		return ret;
1295 
1296 	return vlan_vid_add(master, proto, vid);
1297 }
1298 
1299 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1300 				      u16 vid)
1301 {
1302 	struct net_device *master = dsa_slave_to_master(dev);
1303 	struct dsa_port *dp = dsa_slave_to_port(dev);
1304 	struct switchdev_obj_port_vlan vlan = {
1305 		.vid = vid,
1306 		/* This API only allows programming tagged, non-PVID VIDs */
1307 		.flags = 0,
1308 	};
1309 	int err;
1310 
1311 	/* Do not deprogram the CPU port as it may be shared with other user
1312 	 * ports which can be members of this VLAN as well.
1313 	 */
1314 	err = dsa_port_vlan_del(dp, &vlan);
1315 	if (err)
1316 		return err;
1317 
1318 	vlan_vid_del(master, proto, vid);
1319 
1320 	return 0;
1321 }
1322 
1323 struct dsa_hw_port {
1324 	struct list_head list;
1325 	struct net_device *dev;
1326 	int old_mtu;
1327 };
1328 
1329 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1330 {
1331 	const struct dsa_hw_port *p;
1332 	int err;
1333 
1334 	list_for_each_entry(p, hw_port_list, list) {
1335 		if (p->dev->mtu == mtu)
1336 			continue;
1337 
1338 		err = dev_set_mtu(p->dev, mtu);
1339 		if (err)
1340 			goto rollback;
1341 	}
1342 
1343 	return 0;
1344 
1345 rollback:
1346 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1347 		if (p->dev->mtu == p->old_mtu)
1348 			continue;
1349 
1350 		if (dev_set_mtu(p->dev, p->old_mtu))
1351 			netdev_err(p->dev, "Failed to restore MTU\n");
1352 	}
1353 
1354 	return err;
1355 }
1356 
1357 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1358 {
1359 	struct dsa_hw_port *p, *n;
1360 
1361 	list_for_each_entry_safe(p, n, hw_port_list, list)
1362 		kfree(p);
1363 }
1364 
1365 /* Make the hardware datapath to/from @dev limited to a common MTU */
1366 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1367 {
1368 	struct list_head hw_port_list;
1369 	struct dsa_switch_tree *dst;
1370 	int min_mtu = ETH_MAX_MTU;
1371 	struct dsa_port *other_dp;
1372 	int err;
1373 
1374 	if (!dp->ds->mtu_enforcement_ingress)
1375 		return;
1376 
1377 	if (!dp->bridge_dev)
1378 		return;
1379 
1380 	INIT_LIST_HEAD(&hw_port_list);
1381 
1382 	/* Populate the list of ports that are part of the same bridge
1383 	 * as the newly added/modified port
1384 	 */
1385 	list_for_each_entry(dst, &dsa_tree_list, list) {
1386 		list_for_each_entry(other_dp, &dst->ports, list) {
1387 			struct dsa_hw_port *hw_port;
1388 			struct net_device *slave;
1389 
1390 			if (other_dp->type != DSA_PORT_TYPE_USER)
1391 				continue;
1392 
1393 			if (other_dp->bridge_dev != dp->bridge_dev)
1394 				continue;
1395 
1396 			if (!other_dp->ds->mtu_enforcement_ingress)
1397 				continue;
1398 
1399 			slave = other_dp->slave;
1400 
1401 			if (min_mtu > slave->mtu)
1402 				min_mtu = slave->mtu;
1403 
1404 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1405 			if (!hw_port)
1406 				goto out;
1407 
1408 			hw_port->dev = slave;
1409 			hw_port->old_mtu = slave->mtu;
1410 
1411 			list_add(&hw_port->list, &hw_port_list);
1412 		}
1413 	}
1414 
1415 	/* Attempt to configure the entire hardware bridge to the newly added
1416 	 * interface's MTU first, regardless of whether the intention of the
1417 	 * user was to raise or lower it.
1418 	 */
1419 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1420 	if (!err)
1421 		goto out;
1422 
1423 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1424 	 * all hardware bridge ports now. If this fails too, then all ports will
1425 	 * still have their old MTU rolled back anyway.
1426 	 */
1427 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1428 
1429 out:
1430 	dsa_hw_port_list_free(&hw_port_list);
1431 }
1432 
1433 static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1434 {
1435 	struct net_device *master = dsa_slave_to_master(dev);
1436 	struct dsa_port *dp = dsa_slave_to_port(dev);
1437 	struct dsa_slave_priv *p = netdev_priv(dev);
1438 	struct dsa_switch *ds = p->dp->ds;
1439 	struct dsa_port *cpu_dp;
1440 	int port = p->dp->index;
1441 	int largest_mtu = 0;
1442 	int new_master_mtu;
1443 	int old_master_mtu;
1444 	int mtu_limit;
1445 	int cpu_mtu;
1446 	int err, i;
1447 
1448 	if (!ds->ops->port_change_mtu)
1449 		return -EOPNOTSUPP;
1450 
1451 	for (i = 0; i < ds->num_ports; i++) {
1452 		int slave_mtu;
1453 
1454 		if (!dsa_is_user_port(ds, i))
1455 			continue;
1456 
1457 		/* During probe, this function will be called for each slave
1458 		 * device, while not all of them have been allocated. That's
1459 		 * ok, it doesn't change what the maximum is, so ignore it.
1460 		 */
1461 		if (!dsa_to_port(ds, i)->slave)
1462 			continue;
1463 
1464 		/* Pretend that we already applied the setting, which we
1465 		 * actually haven't (still haven't done all integrity checks)
1466 		 */
1467 		if (i == port)
1468 			slave_mtu = new_mtu;
1469 		else
1470 			slave_mtu = dsa_to_port(ds, i)->slave->mtu;
1471 
1472 		if (largest_mtu < slave_mtu)
1473 			largest_mtu = slave_mtu;
1474 	}
1475 
1476 	cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1477 
1478 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1479 	old_master_mtu = master->mtu;
1480 	new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
1481 	if (new_master_mtu > mtu_limit)
1482 		return -ERANGE;
1483 
1484 	/* If the master MTU isn't over limit, there's no need to check the CPU
1485 	 * MTU, since that surely isn't either.
1486 	 */
1487 	cpu_mtu = largest_mtu;
1488 
1489 	/* Start applying stuff */
1490 	if (new_master_mtu != old_master_mtu) {
1491 		err = dev_set_mtu(master, new_master_mtu);
1492 		if (err < 0)
1493 			goto out_master_failed;
1494 
1495 		/* We only need to propagate the MTU of the CPU port to
1496 		 * upstream switches.
1497 		 */
1498 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
1499 		if (err)
1500 			goto out_cpu_failed;
1501 	}
1502 
1503 	err = dsa_port_mtu_change(dp, new_mtu, false);
1504 	if (err)
1505 		goto out_port_failed;
1506 
1507 	dev->mtu = new_mtu;
1508 
1509 	dsa_bridge_mtu_normalization(dp);
1510 
1511 	return 0;
1512 
1513 out_port_failed:
1514 	if (new_master_mtu != old_master_mtu)
1515 		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1516 				    cpu_dp->tag_ops->overhead,
1517 				    true);
1518 out_cpu_failed:
1519 	if (new_master_mtu != old_master_mtu)
1520 		dev_set_mtu(master, old_master_mtu);
1521 out_master_failed:
1522 	return err;
1523 }
1524 
1525 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1526 	.get_drvinfo		= dsa_slave_get_drvinfo,
1527 	.get_regs_len		= dsa_slave_get_regs_len,
1528 	.get_regs		= dsa_slave_get_regs,
1529 	.nway_reset		= dsa_slave_nway_reset,
1530 	.get_link		= ethtool_op_get_link,
1531 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1532 	.get_eeprom		= dsa_slave_get_eeprom,
1533 	.set_eeprom		= dsa_slave_set_eeprom,
1534 	.get_strings		= dsa_slave_get_strings,
1535 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1536 	.get_sset_count		= dsa_slave_get_sset_count,
1537 	.set_wol		= dsa_slave_set_wol,
1538 	.get_wol		= dsa_slave_get_wol,
1539 	.set_eee		= dsa_slave_set_eee,
1540 	.get_eee		= dsa_slave_get_eee,
1541 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1542 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1543 	.get_pauseparam		= dsa_slave_get_pauseparam,
1544 	.set_pauseparam		= dsa_slave_set_pauseparam,
1545 	.get_rxnfc		= dsa_slave_get_rxnfc,
1546 	.set_rxnfc		= dsa_slave_set_rxnfc,
1547 	.get_ts_info		= dsa_slave_get_ts_info,
1548 };
1549 
1550 /* legacy way, bypassing the bridge *****************************************/
1551 static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1552 			      struct net_device *dev,
1553 			      const unsigned char *addr, u16 vid,
1554 			      u16 flags,
1555 			      struct netlink_ext_ack *extack)
1556 {
1557 	struct dsa_port *dp = dsa_slave_to_port(dev);
1558 
1559 	return dsa_port_fdb_add(dp, addr, vid);
1560 }
1561 
1562 static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1563 			      struct net_device *dev,
1564 			      const unsigned char *addr, u16 vid)
1565 {
1566 	struct dsa_port *dp = dsa_slave_to_port(dev);
1567 
1568 	return dsa_port_fdb_del(dp, addr, vid);
1569 }
1570 
1571 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1572 {
1573 	struct dsa_port *dp = dsa_slave_to_port(dev);
1574 
1575 	return dp->ds->devlink ? &dp->devlink_port : NULL;
1576 }
1577 
1578 static void dsa_slave_get_stats64(struct net_device *dev,
1579 				  struct rtnl_link_stats64 *s)
1580 {
1581 	struct dsa_port *dp = dsa_slave_to_port(dev);
1582 	struct dsa_switch *ds = dp->ds;
1583 
1584 	if (ds->ops->get_stats64)
1585 		ds->ops->get_stats64(ds, dp->index, s);
1586 	else
1587 		dev_get_tstats64(dev, s);
1588 }
1589 
1590 static const struct net_device_ops dsa_slave_netdev_ops = {
1591 	.ndo_open	 	= dsa_slave_open,
1592 	.ndo_stop		= dsa_slave_close,
1593 	.ndo_start_xmit		= dsa_slave_xmit,
1594 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
1595 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
1596 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1597 	.ndo_fdb_add		= dsa_legacy_fdb_add,
1598 	.ndo_fdb_del		= dsa_legacy_fdb_del,
1599 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1600 	.ndo_do_ioctl		= dsa_slave_ioctl,
1601 	.ndo_get_iflink		= dsa_slave_get_iflink,
1602 #ifdef CONFIG_NET_POLL_CONTROLLER
1603 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
1604 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
1605 	.ndo_poll_controller	= dsa_slave_poll_controller,
1606 #endif
1607 	.ndo_get_phys_port_name	= dsa_slave_get_phys_port_name,
1608 	.ndo_setup_tc		= dsa_slave_setup_tc,
1609 	.ndo_get_stats64	= dsa_slave_get_stats64,
1610 	.ndo_get_port_parent_id	= dsa_slave_get_port_parent_id,
1611 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
1612 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
1613 	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
1614 	.ndo_change_mtu		= dsa_slave_change_mtu,
1615 };
1616 
1617 static struct device_type dsa_type = {
1618 	.name	= "dsa",
1619 };
1620 
1621 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1622 {
1623 	const struct dsa_port *dp = dsa_to_port(ds, port);
1624 
1625 	if (dp->pl)
1626 		phylink_mac_change(dp->pl, up);
1627 }
1628 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1629 
1630 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1631 					  struct phylink_link_state *state)
1632 {
1633 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1634 	struct dsa_switch *ds = dp->ds;
1635 
1636 	/* No need to check that this operation is valid, the callback would
1637 	 * not be called if it was not.
1638 	 */
1639 	ds->ops->phylink_fixed_state(ds, dp->index, state);
1640 }
1641 
1642 /* slave device setup *******************************************************/
1643 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1644 {
1645 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1646 	struct dsa_switch *ds = dp->ds;
1647 
1648 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1649 	if (!slave_dev->phydev) {
1650 		netdev_err(slave_dev, "no phy at %d\n", addr);
1651 		return -ENODEV;
1652 	}
1653 
1654 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
1655 }
1656 
1657 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1658 {
1659 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1660 	struct device_node *port_dn = dp->dn;
1661 	struct dsa_switch *ds = dp->ds;
1662 	phy_interface_t mode;
1663 	u32 phy_flags = 0;
1664 	int ret;
1665 
1666 	ret = of_get_phy_mode(port_dn, &mode);
1667 	if (ret)
1668 		mode = PHY_INTERFACE_MODE_NA;
1669 
1670 	dp->pl_config.dev = &slave_dev->dev;
1671 	dp->pl_config.type = PHYLINK_NETDEV;
1672 
1673 	/* The get_fixed_state callback takes precedence over polling the
1674 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
1675 	 * this if the switch provides such a callback.
1676 	 */
1677 	if (ds->ops->phylink_fixed_state) {
1678 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1679 		dp->pl_config.poll_fixed_state = true;
1680 	}
1681 
1682 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1683 				&dsa_port_phylink_mac_ops);
1684 	if (IS_ERR(dp->pl)) {
1685 		netdev_err(slave_dev,
1686 			   "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1687 		return PTR_ERR(dp->pl);
1688 	}
1689 
1690 	if (ds->ops->get_phy_flags)
1691 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1692 
1693 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1694 	if (ret == -ENODEV && ds->slave_mii_bus) {
1695 		/* We could not connect to a designated PHY or SFP, so try to
1696 		 * use the switch internal MDIO bus instead
1697 		 */
1698 		ret = dsa_slave_phy_connect(slave_dev, dp->index);
1699 		if (ret) {
1700 			netdev_err(slave_dev,
1701 				   "failed to connect to port %d: %d\n",
1702 				   dp->index, ret);
1703 			phylink_destroy(dp->pl);
1704 			return ret;
1705 		}
1706 	}
1707 
1708 	return ret;
1709 }
1710 
1711 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1712 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1713 					    struct netdev_queue *txq,
1714 					    void *_unused)
1715 {
1716 	lockdep_set_class(&txq->_xmit_lock,
1717 			  &dsa_slave_netdev_xmit_lock_key);
1718 }
1719 
1720 int dsa_slave_suspend(struct net_device *slave_dev)
1721 {
1722 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1723 
1724 	if (!netif_running(slave_dev))
1725 		return 0;
1726 
1727 	netif_device_detach(slave_dev);
1728 
1729 	rtnl_lock();
1730 	phylink_stop(dp->pl);
1731 	rtnl_unlock();
1732 
1733 	return 0;
1734 }
1735 
1736 int dsa_slave_resume(struct net_device *slave_dev)
1737 {
1738 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1739 
1740 	if (!netif_running(slave_dev))
1741 		return 0;
1742 
1743 	netif_device_attach(slave_dev);
1744 
1745 	rtnl_lock();
1746 	phylink_start(dp->pl);
1747 	rtnl_unlock();
1748 
1749 	return 0;
1750 }
1751 
1752 int dsa_slave_create(struct dsa_port *port)
1753 {
1754 	const struct dsa_port *cpu_dp = port->cpu_dp;
1755 	struct net_device *master = cpu_dp->master;
1756 	struct dsa_switch *ds = port->ds;
1757 	const char *name = port->name;
1758 	struct net_device *slave_dev;
1759 	struct dsa_slave_priv *p;
1760 	int ret;
1761 
1762 	if (!ds->num_tx_queues)
1763 		ds->num_tx_queues = 1;
1764 
1765 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1766 				     NET_NAME_UNKNOWN, ether_setup,
1767 				     ds->num_tx_queues, 1);
1768 	if (slave_dev == NULL)
1769 		return -ENOMEM;
1770 
1771 	slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1772 	if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1773 		slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1774 	slave_dev->hw_features |= NETIF_F_HW_TC;
1775 	slave_dev->features |= NETIF_F_LLTX;
1776 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1777 	if (!IS_ERR_OR_NULL(port->mac))
1778 		ether_addr_copy(slave_dev->dev_addr, port->mac);
1779 	else
1780 		eth_hw_addr_inherit(slave_dev, master);
1781 	slave_dev->priv_flags |= IFF_NO_QUEUE;
1782 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1783 	if (ds->ops->port_max_mtu)
1784 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1785 	if (cpu_dp->tag_ops->tail_tag)
1786 		slave_dev->needed_tailroom = cpu_dp->tag_ops->overhead;
1787 	else
1788 		slave_dev->needed_headroom = cpu_dp->tag_ops->overhead;
1789 	/* Try to save one extra realloc later in the TX path (in the master)
1790 	 * by also inheriting the master's needed headroom and tailroom.
1791 	 * The 8021q driver also does this.
1792 	 */
1793 	slave_dev->needed_headroom += master->needed_headroom;
1794 	slave_dev->needed_tailroom += master->needed_tailroom;
1795 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1796 
1797 	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1798 				 NULL);
1799 
1800 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
1801 	slave_dev->dev.of_node = port->dn;
1802 	slave_dev->vlan_features = master->vlan_features;
1803 
1804 	p = netdev_priv(slave_dev);
1805 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1806 	if (!slave_dev->tstats) {
1807 		free_netdev(slave_dev);
1808 		return -ENOMEM;
1809 	}
1810 
1811 	ret = gro_cells_init(&p->gcells, slave_dev);
1812 	if (ret)
1813 		goto out_free;
1814 
1815 	p->dp = port;
1816 	INIT_LIST_HEAD(&p->mall_tc_list);
1817 	p->xmit = cpu_dp->tag_ops->xmit;
1818 	port->slave = slave_dev;
1819 
1820 	rtnl_lock();
1821 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1822 	rtnl_unlock();
1823 	if (ret && ret != -EOPNOTSUPP)
1824 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
1825 			 ret, ETH_DATA_LEN, port->index);
1826 
1827 	netif_carrier_off(slave_dev);
1828 
1829 	ret = dsa_slave_phy_setup(slave_dev);
1830 	if (ret) {
1831 		netdev_err(slave_dev,
1832 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
1833 			   ret, ds->dst->index, ds->index, port->index);
1834 		goto out_gcells;
1835 	}
1836 
1837 	rtnl_lock();
1838 
1839 	ret = register_netdevice(slave_dev);
1840 	if (ret) {
1841 		netdev_err(master, "error %d registering interface %s\n",
1842 			   ret, slave_dev->name);
1843 		rtnl_unlock();
1844 		goto out_phy;
1845 	}
1846 
1847 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
1848 
1849 	rtnl_unlock();
1850 
1851 	if (ret)
1852 		goto out_unregister;
1853 
1854 	return 0;
1855 
1856 out_unregister:
1857 	unregister_netdev(slave_dev);
1858 out_phy:
1859 	rtnl_lock();
1860 	phylink_disconnect_phy(p->dp->pl);
1861 	rtnl_unlock();
1862 	phylink_destroy(p->dp->pl);
1863 out_gcells:
1864 	gro_cells_destroy(&p->gcells);
1865 out_free:
1866 	free_percpu(slave_dev->tstats);
1867 	free_netdev(slave_dev);
1868 	port->slave = NULL;
1869 	return ret;
1870 }
1871 
1872 void dsa_slave_destroy(struct net_device *slave_dev)
1873 {
1874 	struct net_device *master = dsa_slave_to_master(slave_dev);
1875 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1876 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1877 
1878 	netif_carrier_off(slave_dev);
1879 	rtnl_lock();
1880 	netdev_upper_dev_unlink(master, slave_dev);
1881 	unregister_netdevice(slave_dev);
1882 	phylink_disconnect_phy(dp->pl);
1883 	rtnl_unlock();
1884 
1885 	phylink_destroy(dp->pl);
1886 	gro_cells_destroy(&p->gcells);
1887 	free_percpu(slave_dev->tstats);
1888 	free_netdev(slave_dev);
1889 }
1890 
1891 bool dsa_slave_dev_check(const struct net_device *dev)
1892 {
1893 	return dev->netdev_ops == &dsa_slave_netdev_ops;
1894 }
1895 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
1896 
1897 static int dsa_slave_changeupper(struct net_device *dev,
1898 				 struct netdev_notifier_changeupper_info *info)
1899 {
1900 	struct dsa_port *dp = dsa_slave_to_port(dev);
1901 	int err = NOTIFY_DONE;
1902 
1903 	if (netif_is_bridge_master(info->upper_dev)) {
1904 		if (info->linking) {
1905 			err = dsa_port_bridge_join(dp, info->upper_dev);
1906 			if (!err)
1907 				dsa_bridge_mtu_normalization(dp);
1908 			err = notifier_from_errno(err);
1909 		} else {
1910 			dsa_port_bridge_leave(dp, info->upper_dev);
1911 			err = NOTIFY_OK;
1912 		}
1913 	} else if (netif_is_lag_master(info->upper_dev)) {
1914 		if (info->linking) {
1915 			err = dsa_port_lag_join(dp, info->upper_dev,
1916 						info->upper_info);
1917 			if (err == -EOPNOTSUPP) {
1918 				NL_SET_ERR_MSG_MOD(info->info.extack,
1919 						   "Offloading not supported");
1920 				err = 0;
1921 			}
1922 			err = notifier_from_errno(err);
1923 		} else {
1924 			dsa_port_lag_leave(dp, info->upper_dev);
1925 			err = NOTIFY_OK;
1926 		}
1927 	}
1928 
1929 	return err;
1930 }
1931 
1932 static int
1933 dsa_slave_lag_changeupper(struct net_device *dev,
1934 			  struct netdev_notifier_changeupper_info *info)
1935 {
1936 	struct net_device *lower;
1937 	struct list_head *iter;
1938 	int err = NOTIFY_DONE;
1939 	struct dsa_port *dp;
1940 
1941 	netdev_for_each_lower_dev(dev, lower, iter) {
1942 		if (!dsa_slave_dev_check(lower))
1943 			continue;
1944 
1945 		dp = dsa_slave_to_port(lower);
1946 		if (!dp->lag_dev)
1947 			/* Software LAG */
1948 			continue;
1949 
1950 		err = dsa_slave_changeupper(lower, info);
1951 		if (notifier_to_errno(err))
1952 			break;
1953 	}
1954 
1955 	return err;
1956 }
1957 
1958 static int
1959 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
1960 				 struct netdev_notifier_changeupper_info *info)
1961 {
1962 	struct netlink_ext_ack *ext_ack;
1963 	struct net_device *slave;
1964 	struct dsa_port *dp;
1965 
1966 	ext_ack = netdev_notifier_info_to_extack(&info->info);
1967 
1968 	if (!is_vlan_dev(dev))
1969 		return NOTIFY_DONE;
1970 
1971 	slave = vlan_dev_real_dev(dev);
1972 	if (!dsa_slave_dev_check(slave))
1973 		return NOTIFY_DONE;
1974 
1975 	dp = dsa_slave_to_port(slave);
1976 	if (!dp->bridge_dev)
1977 		return NOTIFY_DONE;
1978 
1979 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
1980 	if (br_vlan_enabled(dp->bridge_dev) &&
1981 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
1982 		NL_SET_ERR_MSG_MOD(ext_ack,
1983 				   "Cannot enslave VLAN device into VLAN aware bridge");
1984 		return notifier_from_errno(-EINVAL);
1985 	}
1986 
1987 	return NOTIFY_DONE;
1988 }
1989 
1990 static int
1991 dsa_slave_check_8021q_upper(struct net_device *dev,
1992 			    struct netdev_notifier_changeupper_info *info)
1993 {
1994 	struct dsa_port *dp = dsa_slave_to_port(dev);
1995 	struct net_device *br = dp->bridge_dev;
1996 	struct bridge_vlan_info br_info;
1997 	struct netlink_ext_ack *extack;
1998 	int err = NOTIFY_DONE;
1999 	u16 vid;
2000 
2001 	if (!br || !br_vlan_enabled(br))
2002 		return NOTIFY_DONE;
2003 
2004 	extack = netdev_notifier_info_to_extack(&info->info);
2005 	vid = vlan_dev_vlan_id(info->upper_dev);
2006 
2007 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2008 	 * device, respectively the VID is not found, returning
2009 	 * 0 means success, which is a failure for us here.
2010 	 */
2011 	err = br_vlan_get_info(br, vid, &br_info);
2012 	if (err == 0) {
2013 		NL_SET_ERR_MSG_MOD(extack,
2014 				   "This VLAN is already configured by the bridge");
2015 		return notifier_from_errno(-EBUSY);
2016 	}
2017 
2018 	return NOTIFY_DONE;
2019 }
2020 
2021 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2022 				     unsigned long event, void *ptr)
2023 {
2024 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2025 
2026 	switch (event) {
2027 	case NETDEV_PRECHANGEUPPER: {
2028 		struct netdev_notifier_changeupper_info *info = ptr;
2029 		struct dsa_switch *ds;
2030 		struct dsa_port *dp;
2031 		int err;
2032 
2033 		if (!dsa_slave_dev_check(dev))
2034 			return dsa_prevent_bridging_8021q_upper(dev, ptr);
2035 
2036 		dp = dsa_slave_to_port(dev);
2037 		ds = dp->ds;
2038 
2039 		if (ds->ops->port_prechangeupper) {
2040 			err = ds->ops->port_prechangeupper(ds, dp->index, info);
2041 			if (err)
2042 				return notifier_from_errno(err);
2043 		}
2044 
2045 		if (is_vlan_dev(info->upper_dev))
2046 			return dsa_slave_check_8021q_upper(dev, ptr);
2047 		break;
2048 	}
2049 	case NETDEV_CHANGEUPPER:
2050 		if (dsa_slave_dev_check(dev))
2051 			return dsa_slave_changeupper(dev, ptr);
2052 
2053 		if (netif_is_lag_master(dev))
2054 			return dsa_slave_lag_changeupper(dev, ptr);
2055 
2056 		break;
2057 	case NETDEV_CHANGELOWERSTATE: {
2058 		struct netdev_notifier_changelowerstate_info *info = ptr;
2059 		struct dsa_port *dp;
2060 		int err;
2061 
2062 		if (!dsa_slave_dev_check(dev))
2063 			break;
2064 
2065 		dp = dsa_slave_to_port(dev);
2066 
2067 		err = dsa_port_lag_change(dp, info->lower_state_info);
2068 		return notifier_from_errno(err);
2069 	}
2070 	}
2071 
2072 	return NOTIFY_DONE;
2073 }
2074 
2075 static void
2076 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2077 {
2078 	struct dsa_switch *ds = switchdev_work->ds;
2079 	struct switchdev_notifier_fdb_info info;
2080 	struct dsa_port *dp;
2081 
2082 	if (!dsa_is_user_port(ds, switchdev_work->port))
2083 		return;
2084 
2085 	info.addr = switchdev_work->addr;
2086 	info.vid = switchdev_work->vid;
2087 	info.offloaded = true;
2088 	dp = dsa_to_port(ds, switchdev_work->port);
2089 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2090 				 dp->slave, &info.info, NULL);
2091 }
2092 
2093 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2094 {
2095 	struct dsa_switchdev_event_work *switchdev_work =
2096 		container_of(work, struct dsa_switchdev_event_work, work);
2097 	struct dsa_switch *ds = switchdev_work->ds;
2098 	struct dsa_port *dp;
2099 	int err;
2100 
2101 	dp = dsa_to_port(ds, switchdev_work->port);
2102 
2103 	rtnl_lock();
2104 	switch (switchdev_work->event) {
2105 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2106 		err = dsa_port_fdb_add(dp, switchdev_work->addr,
2107 				       switchdev_work->vid);
2108 		if (err) {
2109 			dev_err(ds->dev,
2110 				"port %d failed to add %pM vid %d to fdb: %d\n",
2111 				dp->index, switchdev_work->addr,
2112 				switchdev_work->vid, err);
2113 			break;
2114 		}
2115 		dsa_fdb_offload_notify(switchdev_work);
2116 		break;
2117 
2118 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2119 		err = dsa_port_fdb_del(dp, switchdev_work->addr,
2120 				       switchdev_work->vid);
2121 		if (err) {
2122 			dev_err(ds->dev,
2123 				"port %d failed to delete %pM vid %d from fdb: %d\n",
2124 				dp->index, switchdev_work->addr,
2125 				switchdev_work->vid, err);
2126 		}
2127 
2128 		break;
2129 	}
2130 	rtnl_unlock();
2131 
2132 	kfree(switchdev_work);
2133 	if (dsa_is_user_port(ds, dp->index))
2134 		dev_put(dp->slave);
2135 }
2136 
2137 static int dsa_lower_dev_walk(struct net_device *lower_dev,
2138 			      struct netdev_nested_priv *priv)
2139 {
2140 	if (dsa_slave_dev_check(lower_dev)) {
2141 		priv->data = (void *)netdev_priv(lower_dev);
2142 		return 1;
2143 	}
2144 
2145 	return 0;
2146 }
2147 
2148 static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev)
2149 {
2150 	struct netdev_nested_priv priv = {
2151 		.data = NULL,
2152 	};
2153 
2154 	netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv);
2155 
2156 	return (struct dsa_slave_priv *)priv.data;
2157 }
2158 
2159 /* Called under rcu_read_lock() */
2160 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2161 				     unsigned long event, void *ptr)
2162 {
2163 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2164 	const struct switchdev_notifier_fdb_info *fdb_info;
2165 	struct dsa_switchdev_event_work *switchdev_work;
2166 	struct dsa_port *dp;
2167 	int err;
2168 
2169 	switch (event) {
2170 	case SWITCHDEV_PORT_ATTR_SET:
2171 		err = switchdev_handle_port_attr_set(dev, ptr,
2172 						     dsa_slave_dev_check,
2173 						     dsa_slave_port_attr_set);
2174 		return notifier_from_errno(err);
2175 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2176 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2177 		fdb_info = ptr;
2178 
2179 		if (dsa_slave_dev_check(dev)) {
2180 			if (!fdb_info->added_by_user)
2181 				return NOTIFY_OK;
2182 
2183 			dp = dsa_slave_to_port(dev);
2184 		} else {
2185 			/* Snoop addresses learnt on foreign interfaces
2186 			 * bridged with us, for switches that don't
2187 			 * automatically learn SA from CPU-injected traffic
2188 			 */
2189 			struct net_device *br_dev;
2190 			struct dsa_slave_priv *p;
2191 
2192 			br_dev = netdev_master_upper_dev_get_rcu(dev);
2193 			if (!br_dev)
2194 				return NOTIFY_DONE;
2195 
2196 			if (!netif_is_bridge_master(br_dev))
2197 				return NOTIFY_DONE;
2198 
2199 			p = dsa_slave_dev_lower_find(br_dev);
2200 			if (!p)
2201 				return NOTIFY_DONE;
2202 
2203 			dp = p->dp->cpu_dp;
2204 
2205 			if (!dp->ds->assisted_learning_on_cpu_port)
2206 				return NOTIFY_DONE;
2207 		}
2208 
2209 		if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
2210 			return NOTIFY_DONE;
2211 
2212 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2213 		if (!switchdev_work)
2214 			return NOTIFY_BAD;
2215 
2216 		INIT_WORK(&switchdev_work->work,
2217 			  dsa_slave_switchdev_event_work);
2218 		switchdev_work->ds = dp->ds;
2219 		switchdev_work->port = dp->index;
2220 		switchdev_work->event = event;
2221 
2222 		ether_addr_copy(switchdev_work->addr,
2223 				fdb_info->addr);
2224 		switchdev_work->vid = fdb_info->vid;
2225 
2226 		/* Hold a reference on the slave for dsa_fdb_offload_notify */
2227 		if (dsa_is_user_port(dp->ds, dp->index))
2228 			dev_hold(dev);
2229 		dsa_schedule_work(&switchdev_work->work);
2230 		break;
2231 	default:
2232 		return NOTIFY_DONE;
2233 	}
2234 
2235 	return NOTIFY_OK;
2236 }
2237 
2238 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2239 					      unsigned long event, void *ptr)
2240 {
2241 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2242 	int err;
2243 
2244 	switch (event) {
2245 	case SWITCHDEV_PORT_OBJ_ADD:
2246 		err = switchdev_handle_port_obj_add(dev, ptr,
2247 						    dsa_slave_dev_check,
2248 						    dsa_slave_port_obj_add);
2249 		return notifier_from_errno(err);
2250 	case SWITCHDEV_PORT_OBJ_DEL:
2251 		err = switchdev_handle_port_obj_del(dev, ptr,
2252 						    dsa_slave_dev_check,
2253 						    dsa_slave_port_obj_del);
2254 		return notifier_from_errno(err);
2255 	case SWITCHDEV_PORT_ATTR_SET:
2256 		err = switchdev_handle_port_attr_set(dev, ptr,
2257 						     dsa_slave_dev_check,
2258 						     dsa_slave_port_attr_set);
2259 		return notifier_from_errno(err);
2260 	}
2261 
2262 	return NOTIFY_DONE;
2263 }
2264 
2265 static struct notifier_block dsa_slave_nb __read_mostly = {
2266 	.notifier_call  = dsa_slave_netdevice_event,
2267 };
2268 
2269 static struct notifier_block dsa_slave_switchdev_notifier = {
2270 	.notifier_call = dsa_slave_switchdev_event,
2271 };
2272 
2273 static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2274 	.notifier_call = dsa_slave_switchdev_blocking_event,
2275 };
2276 
2277 int dsa_slave_register_notifier(void)
2278 {
2279 	struct notifier_block *nb;
2280 	int err;
2281 
2282 	err = register_netdevice_notifier(&dsa_slave_nb);
2283 	if (err)
2284 		return err;
2285 
2286 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2287 	if (err)
2288 		goto err_switchdev_nb;
2289 
2290 	nb = &dsa_slave_switchdev_blocking_notifier;
2291 	err = register_switchdev_blocking_notifier(nb);
2292 	if (err)
2293 		goto err_switchdev_blocking_nb;
2294 
2295 	return 0;
2296 
2297 err_switchdev_blocking_nb:
2298 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2299 err_switchdev_nb:
2300 	unregister_netdevice_notifier(&dsa_slave_nb);
2301 	return err;
2302 }
2303 
2304 void dsa_slave_unregister_notifier(void)
2305 {
2306 	struct notifier_block *nb;
2307 	int err;
2308 
2309 	nb = &dsa_slave_switchdev_blocking_notifier;
2310 	err = unregister_switchdev_blocking_notifier(nb);
2311 	if (err)
2312 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2313 
2314 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2315 	if (err)
2316 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2317 
2318 	err = unregister_netdevice_notifier(&dsa_slave_nb);
2319 	if (err)
2320 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
2321 }
2322