xref: /openbmc/linux/net/dsa/slave.c (revision b4e18b29)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/if_hsr.h>
21 #include <linux/netpoll.h>
22 #include <linux/ptp_classify.h>
23 
24 #include "dsa_priv.h"
25 
26 /* slave mii_bus handling ***************************************************/
27 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
28 {
29 	struct dsa_switch *ds = bus->priv;
30 
31 	if (ds->phys_mii_mask & (1 << addr))
32 		return ds->ops->phy_read(ds, addr, reg);
33 
34 	return 0xffff;
35 }
36 
37 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
38 {
39 	struct dsa_switch *ds = bus->priv;
40 
41 	if (ds->phys_mii_mask & (1 << addr))
42 		return ds->ops->phy_write(ds, addr, reg, val);
43 
44 	return 0;
45 }
46 
47 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
48 {
49 	ds->slave_mii_bus->priv = (void *)ds;
50 	ds->slave_mii_bus->name = "dsa slave smi";
51 	ds->slave_mii_bus->read = dsa_slave_phy_read;
52 	ds->slave_mii_bus->write = dsa_slave_phy_write;
53 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
54 		 ds->dst->index, ds->index);
55 	ds->slave_mii_bus->parent = ds->dev;
56 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
57 }
58 
59 
60 /* slave device handling ****************************************************/
61 static int dsa_slave_get_iflink(const struct net_device *dev)
62 {
63 	return dsa_slave_to_master(dev)->ifindex;
64 }
65 
66 static int dsa_slave_open(struct net_device *dev)
67 {
68 	struct net_device *master = dsa_slave_to_master(dev);
69 	struct dsa_port *dp = dsa_slave_to_port(dev);
70 	int err;
71 
72 	err = dev_open(master, NULL);
73 	if (err < 0) {
74 		netdev_err(dev, "failed to open master %s\n", master->name);
75 		goto out;
76 	}
77 
78 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
79 		err = dev_uc_add(master, dev->dev_addr);
80 		if (err < 0)
81 			goto out;
82 	}
83 
84 	if (dev->flags & IFF_ALLMULTI) {
85 		err = dev_set_allmulti(master, 1);
86 		if (err < 0)
87 			goto del_unicast;
88 	}
89 	if (dev->flags & IFF_PROMISC) {
90 		err = dev_set_promiscuity(master, 1);
91 		if (err < 0)
92 			goto clear_allmulti;
93 	}
94 
95 	err = dsa_port_enable_rt(dp, dev->phydev);
96 	if (err)
97 		goto clear_promisc;
98 
99 	return 0;
100 
101 clear_promisc:
102 	if (dev->flags & IFF_PROMISC)
103 		dev_set_promiscuity(master, -1);
104 clear_allmulti:
105 	if (dev->flags & IFF_ALLMULTI)
106 		dev_set_allmulti(master, -1);
107 del_unicast:
108 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
109 		dev_uc_del(master, dev->dev_addr);
110 out:
111 	return err;
112 }
113 
114 static int dsa_slave_close(struct net_device *dev)
115 {
116 	struct net_device *master = dsa_slave_to_master(dev);
117 	struct dsa_port *dp = dsa_slave_to_port(dev);
118 
119 	dsa_port_disable_rt(dp);
120 
121 	dev_mc_unsync(master, dev);
122 	dev_uc_unsync(master, dev);
123 	if (dev->flags & IFF_ALLMULTI)
124 		dev_set_allmulti(master, -1);
125 	if (dev->flags & IFF_PROMISC)
126 		dev_set_promiscuity(master, -1);
127 
128 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
129 		dev_uc_del(master, dev->dev_addr);
130 
131 	return 0;
132 }
133 
134 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
135 {
136 	struct net_device *master = dsa_slave_to_master(dev);
137 	if (dev->flags & IFF_UP) {
138 		if (change & IFF_ALLMULTI)
139 			dev_set_allmulti(master,
140 					 dev->flags & IFF_ALLMULTI ? 1 : -1);
141 		if (change & IFF_PROMISC)
142 			dev_set_promiscuity(master,
143 					    dev->flags & IFF_PROMISC ? 1 : -1);
144 	}
145 }
146 
147 static void dsa_slave_set_rx_mode(struct net_device *dev)
148 {
149 	struct net_device *master = dsa_slave_to_master(dev);
150 
151 	dev_mc_sync(master, dev);
152 	dev_uc_sync(master, dev);
153 }
154 
155 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
156 {
157 	struct net_device *master = dsa_slave_to_master(dev);
158 	struct sockaddr *addr = a;
159 	int err;
160 
161 	if (!is_valid_ether_addr(addr->sa_data))
162 		return -EADDRNOTAVAIL;
163 
164 	if (!(dev->flags & IFF_UP))
165 		goto out;
166 
167 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
168 		err = dev_uc_add(master, addr->sa_data);
169 		if (err < 0)
170 			return err;
171 	}
172 
173 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
174 		dev_uc_del(master, dev->dev_addr);
175 
176 out:
177 	ether_addr_copy(dev->dev_addr, addr->sa_data);
178 
179 	return 0;
180 }
181 
182 struct dsa_slave_dump_ctx {
183 	struct net_device *dev;
184 	struct sk_buff *skb;
185 	struct netlink_callback *cb;
186 	int idx;
187 };
188 
189 static int
190 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
191 			   bool is_static, void *data)
192 {
193 	struct dsa_slave_dump_ctx *dump = data;
194 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
195 	u32 seq = dump->cb->nlh->nlmsg_seq;
196 	struct nlmsghdr *nlh;
197 	struct ndmsg *ndm;
198 
199 	if (dump->idx < dump->cb->args[2])
200 		goto skip;
201 
202 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
203 			sizeof(*ndm), NLM_F_MULTI);
204 	if (!nlh)
205 		return -EMSGSIZE;
206 
207 	ndm = nlmsg_data(nlh);
208 	ndm->ndm_family  = AF_BRIDGE;
209 	ndm->ndm_pad1    = 0;
210 	ndm->ndm_pad2    = 0;
211 	ndm->ndm_flags   = NTF_SELF;
212 	ndm->ndm_type    = 0;
213 	ndm->ndm_ifindex = dump->dev->ifindex;
214 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
215 
216 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
217 		goto nla_put_failure;
218 
219 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
220 		goto nla_put_failure;
221 
222 	nlmsg_end(dump->skb, nlh);
223 
224 skip:
225 	dump->idx++;
226 	return 0;
227 
228 nla_put_failure:
229 	nlmsg_cancel(dump->skb, nlh);
230 	return -EMSGSIZE;
231 }
232 
233 static int
234 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
235 		   struct net_device *dev, struct net_device *filter_dev,
236 		   int *idx)
237 {
238 	struct dsa_port *dp = dsa_slave_to_port(dev);
239 	struct dsa_slave_dump_ctx dump = {
240 		.dev = dev,
241 		.skb = skb,
242 		.cb = cb,
243 		.idx = *idx,
244 	};
245 	int err;
246 
247 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
248 	*idx = dump.idx;
249 
250 	return err;
251 }
252 
253 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
254 {
255 	struct dsa_slave_priv *p = netdev_priv(dev);
256 	struct dsa_switch *ds = p->dp->ds;
257 	int port = p->dp->index;
258 
259 	/* Pass through to switch driver if it supports timestamping */
260 	switch (cmd) {
261 	case SIOCGHWTSTAMP:
262 		if (ds->ops->port_hwtstamp_get)
263 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
264 		break;
265 	case SIOCSHWTSTAMP:
266 		if (ds->ops->port_hwtstamp_set)
267 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
268 		break;
269 	}
270 
271 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
272 }
273 
274 static int dsa_slave_port_attr_set(struct net_device *dev,
275 				   const struct switchdev_attr *attr)
276 {
277 	struct dsa_port *dp = dsa_slave_to_port(dev);
278 	int ret;
279 
280 	if (!dsa_port_offloads_netdev(dp, attr->orig_dev))
281 		return -EOPNOTSUPP;
282 
283 	switch (attr->id) {
284 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
285 		ret = dsa_port_set_state(dp, attr->u.stp_state);
286 		break;
287 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
288 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering);
289 		break;
290 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
291 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
292 		break;
293 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
294 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags);
295 		break;
296 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
297 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags);
298 		break;
299 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
300 		ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter);
301 		break;
302 	default:
303 		ret = -EOPNOTSUPP;
304 		break;
305 	}
306 
307 	return ret;
308 }
309 
310 /* Must be called under rcu_read_lock() */
311 static int
312 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
313 				      const struct switchdev_obj_port_vlan *vlan)
314 {
315 	struct net_device *upper_dev;
316 	struct list_head *iter;
317 
318 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
319 		u16 vid;
320 
321 		if (!is_vlan_dev(upper_dev))
322 			continue;
323 
324 		vid = vlan_dev_vlan_id(upper_dev);
325 		if (vid == vlan->vid)
326 			return -EBUSY;
327 	}
328 
329 	return 0;
330 }
331 
332 static int dsa_slave_vlan_add(struct net_device *dev,
333 			      const struct switchdev_obj *obj,
334 			      struct netlink_ext_ack *extack)
335 {
336 	struct net_device *master = dsa_slave_to_master(dev);
337 	struct dsa_port *dp = dsa_slave_to_port(dev);
338 	struct switchdev_obj_port_vlan vlan;
339 	int err;
340 
341 	if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
342 		return -EOPNOTSUPP;
343 
344 	if (dsa_port_skip_vlan_configuration(dp)) {
345 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
346 		return 0;
347 	}
348 
349 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
350 
351 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
352 	 * the same VID.
353 	 */
354 	if (br_vlan_enabled(dp->bridge_dev)) {
355 		rcu_read_lock();
356 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
357 		rcu_read_unlock();
358 		if (err)
359 			return err;
360 	}
361 
362 	err = dsa_port_vlan_add(dp, &vlan);
363 	if (err)
364 		return err;
365 
366 	/* We need the dedicated CPU port to be a member of the VLAN as well.
367 	 * Even though drivers often handle CPU membership in special ways,
368 	 * it doesn't make sense to program a PVID, so clear this flag.
369 	 */
370 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
371 
372 	err = dsa_port_vlan_add(dp->cpu_dp, &vlan);
373 	if (err)
374 		return err;
375 
376 	return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
377 }
378 
379 static int dsa_slave_port_obj_add(struct net_device *dev,
380 				  const struct switchdev_obj *obj,
381 				  struct netlink_ext_ack *extack)
382 {
383 	struct dsa_port *dp = dsa_slave_to_port(dev);
384 	int err;
385 
386 	switch (obj->id) {
387 	case SWITCHDEV_OBJ_ID_PORT_MDB:
388 		if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
389 			return -EOPNOTSUPP;
390 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
391 		break;
392 	case SWITCHDEV_OBJ_ID_HOST_MDB:
393 		/* DSA can directly translate this to a normal MDB add,
394 		 * but on the CPU port.
395 		 */
396 		err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
397 		break;
398 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
399 		err = dsa_slave_vlan_add(dev, obj, extack);
400 		break;
401 	default:
402 		err = -EOPNOTSUPP;
403 		break;
404 	}
405 
406 	return err;
407 }
408 
409 static int dsa_slave_vlan_del(struct net_device *dev,
410 			      const struct switchdev_obj *obj)
411 {
412 	struct net_device *master = dsa_slave_to_master(dev);
413 	struct dsa_port *dp = dsa_slave_to_port(dev);
414 	struct switchdev_obj_port_vlan *vlan;
415 	int err;
416 
417 	if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
418 		return -EOPNOTSUPP;
419 
420 	if (dsa_port_skip_vlan_configuration(dp))
421 		return 0;
422 
423 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
424 
425 	/* Do not deprogram the CPU port as it may be shared with other user
426 	 * ports which can be members of this VLAN as well.
427 	 */
428 	err = dsa_port_vlan_del(dp, vlan);
429 	if (err)
430 		return err;
431 
432 	vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
433 
434 	return 0;
435 }
436 
437 static int dsa_slave_port_obj_del(struct net_device *dev,
438 				  const struct switchdev_obj *obj)
439 {
440 	struct dsa_port *dp = dsa_slave_to_port(dev);
441 	int err;
442 
443 	switch (obj->id) {
444 	case SWITCHDEV_OBJ_ID_PORT_MDB:
445 		if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
446 			return -EOPNOTSUPP;
447 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
448 		break;
449 	case SWITCHDEV_OBJ_ID_HOST_MDB:
450 		/* DSA can directly translate this to a normal MDB add,
451 		 * but on the CPU port.
452 		 */
453 		err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
454 		break;
455 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
456 		err = dsa_slave_vlan_del(dev, obj);
457 		break;
458 	default:
459 		err = -EOPNOTSUPP;
460 		break;
461 	}
462 
463 	return err;
464 }
465 
466 static int dsa_slave_get_port_parent_id(struct net_device *dev,
467 					struct netdev_phys_item_id *ppid)
468 {
469 	struct dsa_port *dp = dsa_slave_to_port(dev);
470 	struct dsa_switch *ds = dp->ds;
471 	struct dsa_switch_tree *dst = ds->dst;
472 
473 	/* For non-legacy ports, devlink is used and it takes
474 	 * care of the name generation. This ndo implementation
475 	 * should be removed with legacy support.
476 	 */
477 	if (dp->ds->devlink)
478 		return -EOPNOTSUPP;
479 
480 	ppid->id_len = sizeof(dst->index);
481 	memcpy(&ppid->id, &dst->index, ppid->id_len);
482 
483 	return 0;
484 }
485 
486 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
487 						     struct sk_buff *skb)
488 {
489 #ifdef CONFIG_NET_POLL_CONTROLLER
490 	struct dsa_slave_priv *p = netdev_priv(dev);
491 
492 	return netpoll_send_skb(p->netpoll, skb);
493 #else
494 	BUG();
495 	return NETDEV_TX_OK;
496 #endif
497 }
498 
499 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
500 				 struct sk_buff *skb)
501 {
502 	struct dsa_switch *ds = p->dp->ds;
503 	struct sk_buff *clone;
504 	unsigned int type;
505 
506 	type = ptp_classify_raw(skb);
507 	if (type == PTP_CLASS_NONE)
508 		return;
509 
510 	if (!ds->ops->port_txtstamp)
511 		return;
512 
513 	clone = skb_clone_sk(skb);
514 	if (!clone)
515 		return;
516 
517 	if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) {
518 		DSA_SKB_CB(skb)->clone = clone;
519 		return;
520 	}
521 
522 	kfree_skb(clone);
523 }
524 
525 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
526 {
527 	/* SKB for netpoll still need to be mangled with the protocol-specific
528 	 * tag to be successfully transmitted
529 	 */
530 	if (unlikely(netpoll_tx_running(dev)))
531 		return dsa_slave_netpoll_send_skb(dev, skb);
532 
533 	/* Queue the SKB for transmission on the parent interface, but
534 	 * do not modify its EtherType
535 	 */
536 	skb->dev = dsa_slave_to_master(dev);
537 	dev_queue_xmit(skb);
538 
539 	return NETDEV_TX_OK;
540 }
541 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
542 
543 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
544 {
545 	int needed_headroom = dev->needed_headroom;
546 	int needed_tailroom = dev->needed_tailroom;
547 
548 	/* For tail taggers, we need to pad short frames ourselves, to ensure
549 	 * that the tail tag does not fail at its role of being at the end of
550 	 * the packet, once the master interface pads the frame. Account for
551 	 * that pad length here, and pad later.
552 	 */
553 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
554 		needed_tailroom += ETH_ZLEN - skb->len;
555 	/* skb_headroom() returns unsigned int... */
556 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
557 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
558 
559 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
560 		/* No reallocation needed, yay! */
561 		return 0;
562 
563 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
564 				GFP_ATOMIC);
565 }
566 
567 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
568 {
569 	struct dsa_slave_priv *p = netdev_priv(dev);
570 	struct sk_buff *nskb;
571 
572 	dev_sw_netstats_tx_add(dev, 1, skb->len);
573 
574 	DSA_SKB_CB(skb)->clone = NULL;
575 
576 	/* Identify PTP protocol packets, clone them, and pass them to the
577 	 * switch driver
578 	 */
579 	dsa_skb_tx_timestamp(p, skb);
580 
581 	if (dsa_realloc_skb(skb, dev)) {
582 		dev_kfree_skb_any(skb);
583 		return NETDEV_TX_OK;
584 	}
585 
586 	/* needed_tailroom should still be 'warm' in the cache line from
587 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
588 	 */
589 	if (dev->needed_tailroom)
590 		eth_skb_pad(skb);
591 
592 	/* Transmit function may have to reallocate the original SKB,
593 	 * in which case it must have freed it. Only free it here on error.
594 	 */
595 	nskb = p->xmit(skb, dev);
596 	if (!nskb) {
597 		kfree_skb(skb);
598 		return NETDEV_TX_OK;
599 	}
600 
601 	return dsa_enqueue_skb(nskb, dev);
602 }
603 
604 /* ethtool operations *******************************************************/
605 
606 static void dsa_slave_get_drvinfo(struct net_device *dev,
607 				  struct ethtool_drvinfo *drvinfo)
608 {
609 	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
610 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
611 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
612 }
613 
614 static int dsa_slave_get_regs_len(struct net_device *dev)
615 {
616 	struct dsa_port *dp = dsa_slave_to_port(dev);
617 	struct dsa_switch *ds = dp->ds;
618 
619 	if (ds->ops->get_regs_len)
620 		return ds->ops->get_regs_len(ds, dp->index);
621 
622 	return -EOPNOTSUPP;
623 }
624 
625 static void
626 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
627 {
628 	struct dsa_port *dp = dsa_slave_to_port(dev);
629 	struct dsa_switch *ds = dp->ds;
630 
631 	if (ds->ops->get_regs)
632 		ds->ops->get_regs(ds, dp->index, regs, _p);
633 }
634 
635 static int dsa_slave_nway_reset(struct net_device *dev)
636 {
637 	struct dsa_port *dp = dsa_slave_to_port(dev);
638 
639 	return phylink_ethtool_nway_reset(dp->pl);
640 }
641 
642 static int dsa_slave_get_eeprom_len(struct net_device *dev)
643 {
644 	struct dsa_port *dp = dsa_slave_to_port(dev);
645 	struct dsa_switch *ds = dp->ds;
646 
647 	if (ds->cd && ds->cd->eeprom_len)
648 		return ds->cd->eeprom_len;
649 
650 	if (ds->ops->get_eeprom_len)
651 		return ds->ops->get_eeprom_len(ds);
652 
653 	return 0;
654 }
655 
656 static int dsa_slave_get_eeprom(struct net_device *dev,
657 				struct ethtool_eeprom *eeprom, u8 *data)
658 {
659 	struct dsa_port *dp = dsa_slave_to_port(dev);
660 	struct dsa_switch *ds = dp->ds;
661 
662 	if (ds->ops->get_eeprom)
663 		return ds->ops->get_eeprom(ds, eeprom, data);
664 
665 	return -EOPNOTSUPP;
666 }
667 
668 static int dsa_slave_set_eeprom(struct net_device *dev,
669 				struct ethtool_eeprom *eeprom, u8 *data)
670 {
671 	struct dsa_port *dp = dsa_slave_to_port(dev);
672 	struct dsa_switch *ds = dp->ds;
673 
674 	if (ds->ops->set_eeprom)
675 		return ds->ops->set_eeprom(ds, eeprom, data);
676 
677 	return -EOPNOTSUPP;
678 }
679 
680 static void dsa_slave_get_strings(struct net_device *dev,
681 				  uint32_t stringset, uint8_t *data)
682 {
683 	struct dsa_port *dp = dsa_slave_to_port(dev);
684 	struct dsa_switch *ds = dp->ds;
685 
686 	if (stringset == ETH_SS_STATS) {
687 		int len = ETH_GSTRING_LEN;
688 
689 		strncpy(data, "tx_packets", len);
690 		strncpy(data + len, "tx_bytes", len);
691 		strncpy(data + 2 * len, "rx_packets", len);
692 		strncpy(data + 3 * len, "rx_bytes", len);
693 		if (ds->ops->get_strings)
694 			ds->ops->get_strings(ds, dp->index, stringset,
695 					     data + 4 * len);
696 	}
697 }
698 
699 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
700 					struct ethtool_stats *stats,
701 					uint64_t *data)
702 {
703 	struct dsa_port *dp = dsa_slave_to_port(dev);
704 	struct dsa_switch *ds = dp->ds;
705 	struct pcpu_sw_netstats *s;
706 	unsigned int start;
707 	int i;
708 
709 	for_each_possible_cpu(i) {
710 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
711 
712 		s = per_cpu_ptr(dev->tstats, i);
713 		do {
714 			start = u64_stats_fetch_begin_irq(&s->syncp);
715 			tx_packets = s->tx_packets;
716 			tx_bytes = s->tx_bytes;
717 			rx_packets = s->rx_packets;
718 			rx_bytes = s->rx_bytes;
719 		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
720 		data[0] += tx_packets;
721 		data[1] += tx_bytes;
722 		data[2] += rx_packets;
723 		data[3] += rx_bytes;
724 	}
725 	if (ds->ops->get_ethtool_stats)
726 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
727 }
728 
729 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
730 {
731 	struct dsa_port *dp = dsa_slave_to_port(dev);
732 	struct dsa_switch *ds = dp->ds;
733 
734 	if (sset == ETH_SS_STATS) {
735 		int count;
736 
737 		count = 4;
738 		if (ds->ops->get_sset_count)
739 			count += ds->ops->get_sset_count(ds, dp->index, sset);
740 
741 		return count;
742 	}
743 
744 	return -EOPNOTSUPP;
745 }
746 
747 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
748 {
749 	struct dsa_port *dp = dsa_slave_to_port(dev);
750 	struct dsa_switch *ds = dp->ds;
751 
752 	phylink_ethtool_get_wol(dp->pl, w);
753 
754 	if (ds->ops->get_wol)
755 		ds->ops->get_wol(ds, dp->index, w);
756 }
757 
758 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
759 {
760 	struct dsa_port *dp = dsa_slave_to_port(dev);
761 	struct dsa_switch *ds = dp->ds;
762 	int ret = -EOPNOTSUPP;
763 
764 	phylink_ethtool_set_wol(dp->pl, w);
765 
766 	if (ds->ops->set_wol)
767 		ret = ds->ops->set_wol(ds, dp->index, w);
768 
769 	return ret;
770 }
771 
772 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
773 {
774 	struct dsa_port *dp = dsa_slave_to_port(dev);
775 	struct dsa_switch *ds = dp->ds;
776 	int ret;
777 
778 	/* Port's PHY and MAC both need to be EEE capable */
779 	if (!dev->phydev || !dp->pl)
780 		return -ENODEV;
781 
782 	if (!ds->ops->set_mac_eee)
783 		return -EOPNOTSUPP;
784 
785 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
786 	if (ret)
787 		return ret;
788 
789 	return phylink_ethtool_set_eee(dp->pl, e);
790 }
791 
792 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
793 {
794 	struct dsa_port *dp = dsa_slave_to_port(dev);
795 	struct dsa_switch *ds = dp->ds;
796 	int ret;
797 
798 	/* Port's PHY and MAC both need to be EEE capable */
799 	if (!dev->phydev || !dp->pl)
800 		return -ENODEV;
801 
802 	if (!ds->ops->get_mac_eee)
803 		return -EOPNOTSUPP;
804 
805 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
806 	if (ret)
807 		return ret;
808 
809 	return phylink_ethtool_get_eee(dp->pl, e);
810 }
811 
812 static int dsa_slave_get_link_ksettings(struct net_device *dev,
813 					struct ethtool_link_ksettings *cmd)
814 {
815 	struct dsa_port *dp = dsa_slave_to_port(dev);
816 
817 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
818 }
819 
820 static int dsa_slave_set_link_ksettings(struct net_device *dev,
821 					const struct ethtool_link_ksettings *cmd)
822 {
823 	struct dsa_port *dp = dsa_slave_to_port(dev);
824 
825 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
826 }
827 
828 static void dsa_slave_get_pauseparam(struct net_device *dev,
829 				     struct ethtool_pauseparam *pause)
830 {
831 	struct dsa_port *dp = dsa_slave_to_port(dev);
832 
833 	phylink_ethtool_get_pauseparam(dp->pl, pause);
834 }
835 
836 static int dsa_slave_set_pauseparam(struct net_device *dev,
837 				    struct ethtool_pauseparam *pause)
838 {
839 	struct dsa_port *dp = dsa_slave_to_port(dev);
840 
841 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
842 }
843 
844 #ifdef CONFIG_NET_POLL_CONTROLLER
845 static int dsa_slave_netpoll_setup(struct net_device *dev,
846 				   struct netpoll_info *ni)
847 {
848 	struct net_device *master = dsa_slave_to_master(dev);
849 	struct dsa_slave_priv *p = netdev_priv(dev);
850 	struct netpoll *netpoll;
851 	int err = 0;
852 
853 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
854 	if (!netpoll)
855 		return -ENOMEM;
856 
857 	err = __netpoll_setup(netpoll, master);
858 	if (err) {
859 		kfree(netpoll);
860 		goto out;
861 	}
862 
863 	p->netpoll = netpoll;
864 out:
865 	return err;
866 }
867 
868 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
869 {
870 	struct dsa_slave_priv *p = netdev_priv(dev);
871 	struct netpoll *netpoll = p->netpoll;
872 
873 	if (!netpoll)
874 		return;
875 
876 	p->netpoll = NULL;
877 
878 	__netpoll_free(netpoll);
879 }
880 
881 static void dsa_slave_poll_controller(struct net_device *dev)
882 {
883 }
884 #endif
885 
886 static int dsa_slave_get_phys_port_name(struct net_device *dev,
887 					char *name, size_t len)
888 {
889 	struct dsa_port *dp = dsa_slave_to_port(dev);
890 
891 	/* For non-legacy ports, devlink is used and it takes
892 	 * care of the name generation. This ndo implementation
893 	 * should be removed with legacy support.
894 	 */
895 	if (dp->ds->devlink)
896 		return -EOPNOTSUPP;
897 
898 	if (snprintf(name, len, "p%d", dp->index) >= len)
899 		return -EINVAL;
900 
901 	return 0;
902 }
903 
904 static struct dsa_mall_tc_entry *
905 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
906 {
907 	struct dsa_slave_priv *p = netdev_priv(dev);
908 	struct dsa_mall_tc_entry *mall_tc_entry;
909 
910 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
911 		if (mall_tc_entry->cookie == cookie)
912 			return mall_tc_entry;
913 
914 	return NULL;
915 }
916 
917 static int
918 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
919 				  struct tc_cls_matchall_offload *cls,
920 				  bool ingress)
921 {
922 	struct dsa_port *dp = dsa_slave_to_port(dev);
923 	struct dsa_slave_priv *p = netdev_priv(dev);
924 	struct dsa_mall_mirror_tc_entry *mirror;
925 	struct dsa_mall_tc_entry *mall_tc_entry;
926 	struct dsa_switch *ds = dp->ds;
927 	struct flow_action_entry *act;
928 	struct dsa_port *to_dp;
929 	int err;
930 
931 	if (!ds->ops->port_mirror_add)
932 		return -EOPNOTSUPP;
933 
934 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
935 					      cls->common.extack))
936 		return -EOPNOTSUPP;
937 
938 	act = &cls->rule->action.entries[0];
939 
940 	if (!act->dev)
941 		return -EINVAL;
942 
943 	if (!dsa_slave_dev_check(act->dev))
944 		return -EOPNOTSUPP;
945 
946 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
947 	if (!mall_tc_entry)
948 		return -ENOMEM;
949 
950 	mall_tc_entry->cookie = cls->cookie;
951 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
952 	mirror = &mall_tc_entry->mirror;
953 
954 	to_dp = dsa_slave_to_port(act->dev);
955 
956 	mirror->to_local_port = to_dp->index;
957 	mirror->ingress = ingress;
958 
959 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
960 	if (err) {
961 		kfree(mall_tc_entry);
962 		return err;
963 	}
964 
965 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
966 
967 	return err;
968 }
969 
970 static int
971 dsa_slave_add_cls_matchall_police(struct net_device *dev,
972 				  struct tc_cls_matchall_offload *cls,
973 				  bool ingress)
974 {
975 	struct netlink_ext_ack *extack = cls->common.extack;
976 	struct dsa_port *dp = dsa_slave_to_port(dev);
977 	struct dsa_slave_priv *p = netdev_priv(dev);
978 	struct dsa_mall_policer_tc_entry *policer;
979 	struct dsa_mall_tc_entry *mall_tc_entry;
980 	struct dsa_switch *ds = dp->ds;
981 	struct flow_action_entry *act;
982 	int err;
983 
984 	if (!ds->ops->port_policer_add) {
985 		NL_SET_ERR_MSG_MOD(extack,
986 				   "Policing offload not implemented");
987 		return -EOPNOTSUPP;
988 	}
989 
990 	if (!ingress) {
991 		NL_SET_ERR_MSG_MOD(extack,
992 				   "Only supported on ingress qdisc");
993 		return -EOPNOTSUPP;
994 	}
995 
996 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
997 					      cls->common.extack))
998 		return -EOPNOTSUPP;
999 
1000 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1001 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1002 			NL_SET_ERR_MSG_MOD(extack,
1003 					   "Only one port policer allowed");
1004 			return -EEXIST;
1005 		}
1006 	}
1007 
1008 	act = &cls->rule->action.entries[0];
1009 
1010 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1011 	if (!mall_tc_entry)
1012 		return -ENOMEM;
1013 
1014 	mall_tc_entry->cookie = cls->cookie;
1015 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1016 	policer = &mall_tc_entry->policer;
1017 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1018 	policer->burst = act->police.burst;
1019 
1020 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1021 	if (err) {
1022 		kfree(mall_tc_entry);
1023 		return err;
1024 	}
1025 
1026 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1027 
1028 	return err;
1029 }
1030 
1031 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1032 				      struct tc_cls_matchall_offload *cls,
1033 				      bool ingress)
1034 {
1035 	int err = -EOPNOTSUPP;
1036 
1037 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1038 	    flow_offload_has_one_action(&cls->rule->action) &&
1039 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1040 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1041 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1042 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1043 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1044 
1045 	return err;
1046 }
1047 
1048 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1049 				       struct tc_cls_matchall_offload *cls)
1050 {
1051 	struct dsa_port *dp = dsa_slave_to_port(dev);
1052 	struct dsa_mall_tc_entry *mall_tc_entry;
1053 	struct dsa_switch *ds = dp->ds;
1054 
1055 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1056 	if (!mall_tc_entry)
1057 		return;
1058 
1059 	list_del(&mall_tc_entry->list);
1060 
1061 	switch (mall_tc_entry->type) {
1062 	case DSA_PORT_MALL_MIRROR:
1063 		if (ds->ops->port_mirror_del)
1064 			ds->ops->port_mirror_del(ds, dp->index,
1065 						 &mall_tc_entry->mirror);
1066 		break;
1067 	case DSA_PORT_MALL_POLICER:
1068 		if (ds->ops->port_policer_del)
1069 			ds->ops->port_policer_del(ds, dp->index);
1070 		break;
1071 	default:
1072 		WARN_ON(1);
1073 	}
1074 
1075 	kfree(mall_tc_entry);
1076 }
1077 
1078 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1079 					   struct tc_cls_matchall_offload *cls,
1080 					   bool ingress)
1081 {
1082 	if (cls->common.chain_index)
1083 		return -EOPNOTSUPP;
1084 
1085 	switch (cls->command) {
1086 	case TC_CLSMATCHALL_REPLACE:
1087 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1088 	case TC_CLSMATCHALL_DESTROY:
1089 		dsa_slave_del_cls_matchall(dev, cls);
1090 		return 0;
1091 	default:
1092 		return -EOPNOTSUPP;
1093 	}
1094 }
1095 
1096 static int dsa_slave_add_cls_flower(struct net_device *dev,
1097 				    struct flow_cls_offload *cls,
1098 				    bool ingress)
1099 {
1100 	struct dsa_port *dp = dsa_slave_to_port(dev);
1101 	struct dsa_switch *ds = dp->ds;
1102 	int port = dp->index;
1103 
1104 	if (!ds->ops->cls_flower_add)
1105 		return -EOPNOTSUPP;
1106 
1107 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1108 }
1109 
1110 static int dsa_slave_del_cls_flower(struct net_device *dev,
1111 				    struct flow_cls_offload *cls,
1112 				    bool ingress)
1113 {
1114 	struct dsa_port *dp = dsa_slave_to_port(dev);
1115 	struct dsa_switch *ds = dp->ds;
1116 	int port = dp->index;
1117 
1118 	if (!ds->ops->cls_flower_del)
1119 		return -EOPNOTSUPP;
1120 
1121 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1122 }
1123 
1124 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1125 				      struct flow_cls_offload *cls,
1126 				      bool ingress)
1127 {
1128 	struct dsa_port *dp = dsa_slave_to_port(dev);
1129 	struct dsa_switch *ds = dp->ds;
1130 	int port = dp->index;
1131 
1132 	if (!ds->ops->cls_flower_stats)
1133 		return -EOPNOTSUPP;
1134 
1135 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1136 }
1137 
1138 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1139 					 struct flow_cls_offload *cls,
1140 					 bool ingress)
1141 {
1142 	switch (cls->command) {
1143 	case FLOW_CLS_REPLACE:
1144 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1145 	case FLOW_CLS_DESTROY:
1146 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1147 	case FLOW_CLS_STATS:
1148 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1149 	default:
1150 		return -EOPNOTSUPP;
1151 	}
1152 }
1153 
1154 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1155 				       void *cb_priv, bool ingress)
1156 {
1157 	struct net_device *dev = cb_priv;
1158 
1159 	if (!tc_can_offload(dev))
1160 		return -EOPNOTSUPP;
1161 
1162 	switch (type) {
1163 	case TC_SETUP_CLSMATCHALL:
1164 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1165 	case TC_SETUP_CLSFLOWER:
1166 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1167 	default:
1168 		return -EOPNOTSUPP;
1169 	}
1170 }
1171 
1172 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1173 					  void *type_data, void *cb_priv)
1174 {
1175 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1176 }
1177 
1178 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1179 					  void *type_data, void *cb_priv)
1180 {
1181 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1182 }
1183 
1184 static LIST_HEAD(dsa_slave_block_cb_list);
1185 
1186 static int dsa_slave_setup_tc_block(struct net_device *dev,
1187 				    struct flow_block_offload *f)
1188 {
1189 	struct flow_block_cb *block_cb;
1190 	flow_setup_cb_t *cb;
1191 
1192 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1193 		cb = dsa_slave_setup_tc_block_cb_ig;
1194 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1195 		cb = dsa_slave_setup_tc_block_cb_eg;
1196 	else
1197 		return -EOPNOTSUPP;
1198 
1199 	f->driver_block_list = &dsa_slave_block_cb_list;
1200 
1201 	switch (f->command) {
1202 	case FLOW_BLOCK_BIND:
1203 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1204 			return -EBUSY;
1205 
1206 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1207 		if (IS_ERR(block_cb))
1208 			return PTR_ERR(block_cb);
1209 
1210 		flow_block_cb_add(block_cb, f);
1211 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1212 		return 0;
1213 	case FLOW_BLOCK_UNBIND:
1214 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1215 		if (!block_cb)
1216 			return -ENOENT;
1217 
1218 		flow_block_cb_remove(block_cb, f);
1219 		list_del(&block_cb->driver_list);
1220 		return 0;
1221 	default:
1222 		return -EOPNOTSUPP;
1223 	}
1224 }
1225 
1226 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1227 			      void *type_data)
1228 {
1229 	struct dsa_port *dp = dsa_slave_to_port(dev);
1230 	struct dsa_switch *ds = dp->ds;
1231 
1232 	if (type == TC_SETUP_BLOCK)
1233 		return dsa_slave_setup_tc_block(dev, type_data);
1234 
1235 	if (!ds->ops->port_setup_tc)
1236 		return -EOPNOTSUPP;
1237 
1238 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1239 }
1240 
1241 static int dsa_slave_get_rxnfc(struct net_device *dev,
1242 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1243 {
1244 	struct dsa_port *dp = dsa_slave_to_port(dev);
1245 	struct dsa_switch *ds = dp->ds;
1246 
1247 	if (!ds->ops->get_rxnfc)
1248 		return -EOPNOTSUPP;
1249 
1250 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1251 }
1252 
1253 static int dsa_slave_set_rxnfc(struct net_device *dev,
1254 			       struct ethtool_rxnfc *nfc)
1255 {
1256 	struct dsa_port *dp = dsa_slave_to_port(dev);
1257 	struct dsa_switch *ds = dp->ds;
1258 
1259 	if (!ds->ops->set_rxnfc)
1260 		return -EOPNOTSUPP;
1261 
1262 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1263 }
1264 
1265 static int dsa_slave_get_ts_info(struct net_device *dev,
1266 				 struct ethtool_ts_info *ts)
1267 {
1268 	struct dsa_slave_priv *p = netdev_priv(dev);
1269 	struct dsa_switch *ds = p->dp->ds;
1270 
1271 	if (!ds->ops->get_ts_info)
1272 		return -EOPNOTSUPP;
1273 
1274 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1275 }
1276 
1277 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1278 				     u16 vid)
1279 {
1280 	struct net_device *master = dsa_slave_to_master(dev);
1281 	struct dsa_port *dp = dsa_slave_to_port(dev);
1282 	struct switchdev_obj_port_vlan vlan = {
1283 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1284 		.vid = vid,
1285 		/* This API only allows programming tagged, non-PVID VIDs */
1286 		.flags = 0,
1287 	};
1288 	int ret;
1289 
1290 	/* User port... */
1291 	ret = dsa_port_vlan_add(dp, &vlan);
1292 	if (ret)
1293 		return ret;
1294 
1295 	/* And CPU port... */
1296 	ret = dsa_port_vlan_add(dp->cpu_dp, &vlan);
1297 	if (ret)
1298 		return ret;
1299 
1300 	return vlan_vid_add(master, proto, vid);
1301 }
1302 
1303 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1304 				      u16 vid)
1305 {
1306 	struct net_device *master = dsa_slave_to_master(dev);
1307 	struct dsa_port *dp = dsa_slave_to_port(dev);
1308 	struct switchdev_obj_port_vlan vlan = {
1309 		.vid = vid,
1310 		/* This API only allows programming tagged, non-PVID VIDs */
1311 		.flags = 0,
1312 	};
1313 	int err;
1314 
1315 	/* Do not deprogram the CPU port as it may be shared with other user
1316 	 * ports which can be members of this VLAN as well.
1317 	 */
1318 	err = dsa_port_vlan_del(dp, &vlan);
1319 	if (err)
1320 		return err;
1321 
1322 	vlan_vid_del(master, proto, vid);
1323 
1324 	return 0;
1325 }
1326 
1327 struct dsa_hw_port {
1328 	struct list_head list;
1329 	struct net_device *dev;
1330 	int old_mtu;
1331 };
1332 
1333 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1334 {
1335 	const struct dsa_hw_port *p;
1336 	int err;
1337 
1338 	list_for_each_entry(p, hw_port_list, list) {
1339 		if (p->dev->mtu == mtu)
1340 			continue;
1341 
1342 		err = dev_set_mtu(p->dev, mtu);
1343 		if (err)
1344 			goto rollback;
1345 	}
1346 
1347 	return 0;
1348 
1349 rollback:
1350 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1351 		if (p->dev->mtu == p->old_mtu)
1352 			continue;
1353 
1354 		if (dev_set_mtu(p->dev, p->old_mtu))
1355 			netdev_err(p->dev, "Failed to restore MTU\n");
1356 	}
1357 
1358 	return err;
1359 }
1360 
1361 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1362 {
1363 	struct dsa_hw_port *p, *n;
1364 
1365 	list_for_each_entry_safe(p, n, hw_port_list, list)
1366 		kfree(p);
1367 }
1368 
1369 /* Make the hardware datapath to/from @dev limited to a common MTU */
1370 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1371 {
1372 	struct list_head hw_port_list;
1373 	struct dsa_switch_tree *dst;
1374 	int min_mtu = ETH_MAX_MTU;
1375 	struct dsa_port *other_dp;
1376 	int err;
1377 
1378 	if (!dp->ds->mtu_enforcement_ingress)
1379 		return;
1380 
1381 	if (!dp->bridge_dev)
1382 		return;
1383 
1384 	INIT_LIST_HEAD(&hw_port_list);
1385 
1386 	/* Populate the list of ports that are part of the same bridge
1387 	 * as the newly added/modified port
1388 	 */
1389 	list_for_each_entry(dst, &dsa_tree_list, list) {
1390 		list_for_each_entry(other_dp, &dst->ports, list) {
1391 			struct dsa_hw_port *hw_port;
1392 			struct net_device *slave;
1393 
1394 			if (other_dp->type != DSA_PORT_TYPE_USER)
1395 				continue;
1396 
1397 			if (other_dp->bridge_dev != dp->bridge_dev)
1398 				continue;
1399 
1400 			if (!other_dp->ds->mtu_enforcement_ingress)
1401 				continue;
1402 
1403 			slave = other_dp->slave;
1404 
1405 			if (min_mtu > slave->mtu)
1406 				min_mtu = slave->mtu;
1407 
1408 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1409 			if (!hw_port)
1410 				goto out;
1411 
1412 			hw_port->dev = slave;
1413 			hw_port->old_mtu = slave->mtu;
1414 
1415 			list_add(&hw_port->list, &hw_port_list);
1416 		}
1417 	}
1418 
1419 	/* Attempt to configure the entire hardware bridge to the newly added
1420 	 * interface's MTU first, regardless of whether the intention of the
1421 	 * user was to raise or lower it.
1422 	 */
1423 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1424 	if (!err)
1425 		goto out;
1426 
1427 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1428 	 * all hardware bridge ports now. If this fails too, then all ports will
1429 	 * still have their old MTU rolled back anyway.
1430 	 */
1431 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1432 
1433 out:
1434 	dsa_hw_port_list_free(&hw_port_list);
1435 }
1436 
1437 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1438 {
1439 	struct net_device *master = dsa_slave_to_master(dev);
1440 	struct dsa_port *dp = dsa_slave_to_port(dev);
1441 	struct dsa_slave_priv *p = netdev_priv(dev);
1442 	struct dsa_switch *ds = p->dp->ds;
1443 	struct dsa_port *cpu_dp;
1444 	int port = p->dp->index;
1445 	int largest_mtu = 0;
1446 	int new_master_mtu;
1447 	int old_master_mtu;
1448 	int mtu_limit;
1449 	int cpu_mtu;
1450 	int err, i;
1451 
1452 	if (!ds->ops->port_change_mtu)
1453 		return -EOPNOTSUPP;
1454 
1455 	for (i = 0; i < ds->num_ports; i++) {
1456 		int slave_mtu;
1457 
1458 		if (!dsa_is_user_port(ds, i))
1459 			continue;
1460 
1461 		/* During probe, this function will be called for each slave
1462 		 * device, while not all of them have been allocated. That's
1463 		 * ok, it doesn't change what the maximum is, so ignore it.
1464 		 */
1465 		if (!dsa_to_port(ds, i)->slave)
1466 			continue;
1467 
1468 		/* Pretend that we already applied the setting, which we
1469 		 * actually haven't (still haven't done all integrity checks)
1470 		 */
1471 		if (i == port)
1472 			slave_mtu = new_mtu;
1473 		else
1474 			slave_mtu = dsa_to_port(ds, i)->slave->mtu;
1475 
1476 		if (largest_mtu < slave_mtu)
1477 			largest_mtu = slave_mtu;
1478 	}
1479 
1480 	cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1481 
1482 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1483 	old_master_mtu = master->mtu;
1484 	new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
1485 	if (new_master_mtu > mtu_limit)
1486 		return -ERANGE;
1487 
1488 	/* If the master MTU isn't over limit, there's no need to check the CPU
1489 	 * MTU, since that surely isn't either.
1490 	 */
1491 	cpu_mtu = largest_mtu;
1492 
1493 	/* Start applying stuff */
1494 	if (new_master_mtu != old_master_mtu) {
1495 		err = dev_set_mtu(master, new_master_mtu);
1496 		if (err < 0)
1497 			goto out_master_failed;
1498 
1499 		/* We only need to propagate the MTU of the CPU port to
1500 		 * upstream switches.
1501 		 */
1502 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
1503 		if (err)
1504 			goto out_cpu_failed;
1505 	}
1506 
1507 	err = dsa_port_mtu_change(dp, new_mtu, false);
1508 	if (err)
1509 		goto out_port_failed;
1510 
1511 	dev->mtu = new_mtu;
1512 
1513 	dsa_bridge_mtu_normalization(dp);
1514 
1515 	return 0;
1516 
1517 out_port_failed:
1518 	if (new_master_mtu != old_master_mtu)
1519 		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1520 				    cpu_dp->tag_ops->overhead,
1521 				    true);
1522 out_cpu_failed:
1523 	if (new_master_mtu != old_master_mtu)
1524 		dev_set_mtu(master, old_master_mtu);
1525 out_master_failed:
1526 	return err;
1527 }
1528 
1529 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1530 	.get_drvinfo		= dsa_slave_get_drvinfo,
1531 	.get_regs_len		= dsa_slave_get_regs_len,
1532 	.get_regs		= dsa_slave_get_regs,
1533 	.nway_reset		= dsa_slave_nway_reset,
1534 	.get_link		= ethtool_op_get_link,
1535 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1536 	.get_eeprom		= dsa_slave_get_eeprom,
1537 	.set_eeprom		= dsa_slave_set_eeprom,
1538 	.get_strings		= dsa_slave_get_strings,
1539 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1540 	.get_sset_count		= dsa_slave_get_sset_count,
1541 	.set_wol		= dsa_slave_set_wol,
1542 	.get_wol		= dsa_slave_get_wol,
1543 	.set_eee		= dsa_slave_set_eee,
1544 	.get_eee		= dsa_slave_get_eee,
1545 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1546 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1547 	.get_pauseparam		= dsa_slave_get_pauseparam,
1548 	.set_pauseparam		= dsa_slave_set_pauseparam,
1549 	.get_rxnfc		= dsa_slave_get_rxnfc,
1550 	.set_rxnfc		= dsa_slave_set_rxnfc,
1551 	.get_ts_info		= dsa_slave_get_ts_info,
1552 };
1553 
1554 /* legacy way, bypassing the bridge *****************************************/
1555 static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1556 			      struct net_device *dev,
1557 			      const unsigned char *addr, u16 vid,
1558 			      u16 flags,
1559 			      struct netlink_ext_ack *extack)
1560 {
1561 	struct dsa_port *dp = dsa_slave_to_port(dev);
1562 
1563 	return dsa_port_fdb_add(dp, addr, vid);
1564 }
1565 
1566 static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1567 			      struct net_device *dev,
1568 			      const unsigned char *addr, u16 vid)
1569 {
1570 	struct dsa_port *dp = dsa_slave_to_port(dev);
1571 
1572 	return dsa_port_fdb_del(dp, addr, vid);
1573 }
1574 
1575 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1576 {
1577 	struct dsa_port *dp = dsa_slave_to_port(dev);
1578 
1579 	return dp->ds->devlink ? &dp->devlink_port : NULL;
1580 }
1581 
1582 static void dsa_slave_get_stats64(struct net_device *dev,
1583 				  struct rtnl_link_stats64 *s)
1584 {
1585 	struct dsa_port *dp = dsa_slave_to_port(dev);
1586 	struct dsa_switch *ds = dp->ds;
1587 
1588 	if (ds->ops->get_stats64)
1589 		ds->ops->get_stats64(ds, dp->index, s);
1590 	else
1591 		dev_get_tstats64(dev, s);
1592 }
1593 
1594 static const struct net_device_ops dsa_slave_netdev_ops = {
1595 	.ndo_open	 	= dsa_slave_open,
1596 	.ndo_stop		= dsa_slave_close,
1597 	.ndo_start_xmit		= dsa_slave_xmit,
1598 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
1599 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
1600 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1601 	.ndo_fdb_add		= dsa_legacy_fdb_add,
1602 	.ndo_fdb_del		= dsa_legacy_fdb_del,
1603 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1604 	.ndo_do_ioctl		= dsa_slave_ioctl,
1605 	.ndo_get_iflink		= dsa_slave_get_iflink,
1606 #ifdef CONFIG_NET_POLL_CONTROLLER
1607 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
1608 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
1609 	.ndo_poll_controller	= dsa_slave_poll_controller,
1610 #endif
1611 	.ndo_get_phys_port_name	= dsa_slave_get_phys_port_name,
1612 	.ndo_setup_tc		= dsa_slave_setup_tc,
1613 	.ndo_get_stats64	= dsa_slave_get_stats64,
1614 	.ndo_get_port_parent_id	= dsa_slave_get_port_parent_id,
1615 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
1616 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
1617 	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
1618 	.ndo_change_mtu		= dsa_slave_change_mtu,
1619 };
1620 
1621 static struct device_type dsa_type = {
1622 	.name	= "dsa",
1623 };
1624 
1625 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1626 {
1627 	const struct dsa_port *dp = dsa_to_port(ds, port);
1628 
1629 	if (dp->pl)
1630 		phylink_mac_change(dp->pl, up);
1631 }
1632 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1633 
1634 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1635 					  struct phylink_link_state *state)
1636 {
1637 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1638 	struct dsa_switch *ds = dp->ds;
1639 
1640 	/* No need to check that this operation is valid, the callback would
1641 	 * not be called if it was not.
1642 	 */
1643 	ds->ops->phylink_fixed_state(ds, dp->index, state);
1644 }
1645 
1646 /* slave device setup *******************************************************/
1647 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1648 {
1649 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1650 	struct dsa_switch *ds = dp->ds;
1651 
1652 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1653 	if (!slave_dev->phydev) {
1654 		netdev_err(slave_dev, "no phy at %d\n", addr);
1655 		return -ENODEV;
1656 	}
1657 
1658 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
1659 }
1660 
1661 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1662 {
1663 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1664 	struct device_node *port_dn = dp->dn;
1665 	struct dsa_switch *ds = dp->ds;
1666 	phy_interface_t mode;
1667 	u32 phy_flags = 0;
1668 	int ret;
1669 
1670 	ret = of_get_phy_mode(port_dn, &mode);
1671 	if (ret)
1672 		mode = PHY_INTERFACE_MODE_NA;
1673 
1674 	dp->pl_config.dev = &slave_dev->dev;
1675 	dp->pl_config.type = PHYLINK_NETDEV;
1676 
1677 	/* The get_fixed_state callback takes precedence over polling the
1678 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
1679 	 * this if the switch provides such a callback.
1680 	 */
1681 	if (ds->ops->phylink_fixed_state) {
1682 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1683 		dp->pl_config.poll_fixed_state = true;
1684 	}
1685 
1686 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1687 				&dsa_port_phylink_mac_ops);
1688 	if (IS_ERR(dp->pl)) {
1689 		netdev_err(slave_dev,
1690 			   "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1691 		return PTR_ERR(dp->pl);
1692 	}
1693 
1694 	if (ds->ops->get_phy_flags)
1695 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1696 
1697 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1698 	if (ret == -ENODEV && ds->slave_mii_bus) {
1699 		/* We could not connect to a designated PHY or SFP, so try to
1700 		 * use the switch internal MDIO bus instead
1701 		 */
1702 		ret = dsa_slave_phy_connect(slave_dev, dp->index);
1703 		if (ret) {
1704 			netdev_err(slave_dev,
1705 				   "failed to connect to port %d: %d\n",
1706 				   dp->index, ret);
1707 			phylink_destroy(dp->pl);
1708 			return ret;
1709 		}
1710 	}
1711 
1712 	return ret;
1713 }
1714 
1715 void dsa_slave_setup_tagger(struct net_device *slave)
1716 {
1717 	struct dsa_port *dp = dsa_slave_to_port(slave);
1718 	struct dsa_slave_priv *p = netdev_priv(slave);
1719 	const struct dsa_port *cpu_dp = dp->cpu_dp;
1720 	struct net_device *master = cpu_dp->master;
1721 
1722 	if (cpu_dp->tag_ops->tail_tag)
1723 		slave->needed_tailroom = cpu_dp->tag_ops->overhead;
1724 	else
1725 		slave->needed_headroom = cpu_dp->tag_ops->overhead;
1726 	/* Try to save one extra realloc later in the TX path (in the master)
1727 	 * by also inheriting the master's needed headroom and tailroom.
1728 	 * The 8021q driver also does this.
1729 	 */
1730 	slave->needed_headroom += master->needed_headroom;
1731 	slave->needed_tailroom += master->needed_tailroom;
1732 
1733 	p->xmit = cpu_dp->tag_ops->xmit;
1734 }
1735 
1736 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1737 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1738 					    struct netdev_queue *txq,
1739 					    void *_unused)
1740 {
1741 	lockdep_set_class(&txq->_xmit_lock,
1742 			  &dsa_slave_netdev_xmit_lock_key);
1743 }
1744 
1745 int dsa_slave_suspend(struct net_device *slave_dev)
1746 {
1747 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1748 
1749 	if (!netif_running(slave_dev))
1750 		return 0;
1751 
1752 	netif_device_detach(slave_dev);
1753 
1754 	rtnl_lock();
1755 	phylink_stop(dp->pl);
1756 	rtnl_unlock();
1757 
1758 	return 0;
1759 }
1760 
1761 int dsa_slave_resume(struct net_device *slave_dev)
1762 {
1763 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1764 
1765 	if (!netif_running(slave_dev))
1766 		return 0;
1767 
1768 	netif_device_attach(slave_dev);
1769 
1770 	rtnl_lock();
1771 	phylink_start(dp->pl);
1772 	rtnl_unlock();
1773 
1774 	return 0;
1775 }
1776 
1777 int dsa_slave_create(struct dsa_port *port)
1778 {
1779 	const struct dsa_port *cpu_dp = port->cpu_dp;
1780 	struct net_device *master = cpu_dp->master;
1781 	struct dsa_switch *ds = port->ds;
1782 	const char *name = port->name;
1783 	struct net_device *slave_dev;
1784 	struct dsa_slave_priv *p;
1785 	int ret;
1786 
1787 	if (!ds->num_tx_queues)
1788 		ds->num_tx_queues = 1;
1789 
1790 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1791 				     NET_NAME_UNKNOWN, ether_setup,
1792 				     ds->num_tx_queues, 1);
1793 	if (slave_dev == NULL)
1794 		return -ENOMEM;
1795 
1796 	slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1797 	if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1798 		slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1799 	slave_dev->hw_features |= NETIF_F_HW_TC;
1800 	slave_dev->features |= NETIF_F_LLTX;
1801 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1802 	if (!IS_ERR_OR_NULL(port->mac))
1803 		ether_addr_copy(slave_dev->dev_addr, port->mac);
1804 	else
1805 		eth_hw_addr_inherit(slave_dev, master);
1806 	slave_dev->priv_flags |= IFF_NO_QUEUE;
1807 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1808 	if (ds->ops->port_max_mtu)
1809 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1810 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1811 
1812 	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1813 				 NULL);
1814 
1815 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
1816 	slave_dev->dev.of_node = port->dn;
1817 	slave_dev->vlan_features = master->vlan_features;
1818 
1819 	p = netdev_priv(slave_dev);
1820 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1821 	if (!slave_dev->tstats) {
1822 		free_netdev(slave_dev);
1823 		return -ENOMEM;
1824 	}
1825 
1826 	ret = gro_cells_init(&p->gcells, slave_dev);
1827 	if (ret)
1828 		goto out_free;
1829 
1830 	p->dp = port;
1831 	INIT_LIST_HEAD(&p->mall_tc_list);
1832 	port->slave = slave_dev;
1833 	dsa_slave_setup_tagger(slave_dev);
1834 
1835 	rtnl_lock();
1836 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1837 	rtnl_unlock();
1838 	if (ret && ret != -EOPNOTSUPP)
1839 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
1840 			 ret, ETH_DATA_LEN, port->index);
1841 
1842 	netif_carrier_off(slave_dev);
1843 
1844 	ret = dsa_slave_phy_setup(slave_dev);
1845 	if (ret) {
1846 		netdev_err(slave_dev,
1847 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
1848 			   ret, ds->dst->index, ds->index, port->index);
1849 		goto out_gcells;
1850 	}
1851 
1852 	rtnl_lock();
1853 
1854 	ret = register_netdevice(slave_dev);
1855 	if (ret) {
1856 		netdev_err(master, "error %d registering interface %s\n",
1857 			   ret, slave_dev->name);
1858 		rtnl_unlock();
1859 		goto out_phy;
1860 	}
1861 
1862 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
1863 
1864 	rtnl_unlock();
1865 
1866 	if (ret)
1867 		goto out_unregister;
1868 
1869 	return 0;
1870 
1871 out_unregister:
1872 	unregister_netdev(slave_dev);
1873 out_phy:
1874 	rtnl_lock();
1875 	phylink_disconnect_phy(p->dp->pl);
1876 	rtnl_unlock();
1877 	phylink_destroy(p->dp->pl);
1878 out_gcells:
1879 	gro_cells_destroy(&p->gcells);
1880 out_free:
1881 	free_percpu(slave_dev->tstats);
1882 	free_netdev(slave_dev);
1883 	port->slave = NULL;
1884 	return ret;
1885 }
1886 
1887 void dsa_slave_destroy(struct net_device *slave_dev)
1888 {
1889 	struct net_device *master = dsa_slave_to_master(slave_dev);
1890 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1891 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1892 
1893 	netif_carrier_off(slave_dev);
1894 	rtnl_lock();
1895 	netdev_upper_dev_unlink(master, slave_dev);
1896 	unregister_netdevice(slave_dev);
1897 	phylink_disconnect_phy(dp->pl);
1898 	rtnl_unlock();
1899 
1900 	phylink_destroy(dp->pl);
1901 	gro_cells_destroy(&p->gcells);
1902 	free_percpu(slave_dev->tstats);
1903 	free_netdev(slave_dev);
1904 }
1905 
1906 bool dsa_slave_dev_check(const struct net_device *dev)
1907 {
1908 	return dev->netdev_ops == &dsa_slave_netdev_ops;
1909 }
1910 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
1911 
1912 static int dsa_slave_changeupper(struct net_device *dev,
1913 				 struct netdev_notifier_changeupper_info *info)
1914 {
1915 	struct dsa_port *dp = dsa_slave_to_port(dev);
1916 	int err = NOTIFY_DONE;
1917 
1918 	if (netif_is_bridge_master(info->upper_dev)) {
1919 		if (info->linking) {
1920 			err = dsa_port_bridge_join(dp, info->upper_dev);
1921 			if (!err)
1922 				dsa_bridge_mtu_normalization(dp);
1923 			err = notifier_from_errno(err);
1924 		} else {
1925 			dsa_port_bridge_leave(dp, info->upper_dev);
1926 			err = NOTIFY_OK;
1927 		}
1928 	} else if (netif_is_lag_master(info->upper_dev)) {
1929 		if (info->linking) {
1930 			err = dsa_port_lag_join(dp, info->upper_dev,
1931 						info->upper_info);
1932 			if (err == -EOPNOTSUPP) {
1933 				NL_SET_ERR_MSG_MOD(info->info.extack,
1934 						   "Offloading not supported");
1935 				err = 0;
1936 			}
1937 			err = notifier_from_errno(err);
1938 		} else {
1939 			dsa_port_lag_leave(dp, info->upper_dev);
1940 			err = NOTIFY_OK;
1941 		}
1942 	} else if (is_hsr_master(info->upper_dev)) {
1943 		if (info->linking) {
1944 			err = dsa_port_hsr_join(dp, info->upper_dev);
1945 			if (err == -EOPNOTSUPP) {
1946 				NL_SET_ERR_MSG_MOD(info->info.extack,
1947 						   "Offloading not supported");
1948 				err = 0;
1949 			}
1950 			err = notifier_from_errno(err);
1951 		} else {
1952 			dsa_port_hsr_leave(dp, info->upper_dev);
1953 			err = NOTIFY_OK;
1954 		}
1955 	}
1956 
1957 	return err;
1958 }
1959 
1960 static int
1961 dsa_slave_lag_changeupper(struct net_device *dev,
1962 			  struct netdev_notifier_changeupper_info *info)
1963 {
1964 	struct net_device *lower;
1965 	struct list_head *iter;
1966 	int err = NOTIFY_DONE;
1967 	struct dsa_port *dp;
1968 
1969 	netdev_for_each_lower_dev(dev, lower, iter) {
1970 		if (!dsa_slave_dev_check(lower))
1971 			continue;
1972 
1973 		dp = dsa_slave_to_port(lower);
1974 		if (!dp->lag_dev)
1975 			/* Software LAG */
1976 			continue;
1977 
1978 		err = dsa_slave_changeupper(lower, info);
1979 		if (notifier_to_errno(err))
1980 			break;
1981 	}
1982 
1983 	return err;
1984 }
1985 
1986 static int
1987 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
1988 				 struct netdev_notifier_changeupper_info *info)
1989 {
1990 	struct netlink_ext_ack *ext_ack;
1991 	struct net_device *slave;
1992 	struct dsa_port *dp;
1993 
1994 	ext_ack = netdev_notifier_info_to_extack(&info->info);
1995 
1996 	if (!is_vlan_dev(dev))
1997 		return NOTIFY_DONE;
1998 
1999 	slave = vlan_dev_real_dev(dev);
2000 	if (!dsa_slave_dev_check(slave))
2001 		return NOTIFY_DONE;
2002 
2003 	dp = dsa_slave_to_port(slave);
2004 	if (!dp->bridge_dev)
2005 		return NOTIFY_DONE;
2006 
2007 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2008 	if (br_vlan_enabled(dp->bridge_dev) &&
2009 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2010 		NL_SET_ERR_MSG_MOD(ext_ack,
2011 				   "Cannot enslave VLAN device into VLAN aware bridge");
2012 		return notifier_from_errno(-EINVAL);
2013 	}
2014 
2015 	return NOTIFY_DONE;
2016 }
2017 
2018 static int
2019 dsa_slave_check_8021q_upper(struct net_device *dev,
2020 			    struct netdev_notifier_changeupper_info *info)
2021 {
2022 	struct dsa_port *dp = dsa_slave_to_port(dev);
2023 	struct net_device *br = dp->bridge_dev;
2024 	struct bridge_vlan_info br_info;
2025 	struct netlink_ext_ack *extack;
2026 	int err = NOTIFY_DONE;
2027 	u16 vid;
2028 
2029 	if (!br || !br_vlan_enabled(br))
2030 		return NOTIFY_DONE;
2031 
2032 	extack = netdev_notifier_info_to_extack(&info->info);
2033 	vid = vlan_dev_vlan_id(info->upper_dev);
2034 
2035 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2036 	 * device, respectively the VID is not found, returning
2037 	 * 0 means success, which is a failure for us here.
2038 	 */
2039 	err = br_vlan_get_info(br, vid, &br_info);
2040 	if (err == 0) {
2041 		NL_SET_ERR_MSG_MOD(extack,
2042 				   "This VLAN is already configured by the bridge");
2043 		return notifier_from_errno(-EBUSY);
2044 	}
2045 
2046 	return NOTIFY_DONE;
2047 }
2048 
2049 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2050 				     unsigned long event, void *ptr)
2051 {
2052 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2053 
2054 	switch (event) {
2055 	case NETDEV_PRECHANGEUPPER: {
2056 		struct netdev_notifier_changeupper_info *info = ptr;
2057 		struct dsa_switch *ds;
2058 		struct dsa_port *dp;
2059 		int err;
2060 
2061 		if (!dsa_slave_dev_check(dev))
2062 			return dsa_prevent_bridging_8021q_upper(dev, ptr);
2063 
2064 		dp = dsa_slave_to_port(dev);
2065 		ds = dp->ds;
2066 
2067 		if (ds->ops->port_prechangeupper) {
2068 			err = ds->ops->port_prechangeupper(ds, dp->index, info);
2069 			if (err)
2070 				return notifier_from_errno(err);
2071 		}
2072 
2073 		if (is_vlan_dev(info->upper_dev))
2074 			return dsa_slave_check_8021q_upper(dev, ptr);
2075 		break;
2076 	}
2077 	case NETDEV_CHANGEUPPER:
2078 		if (dsa_slave_dev_check(dev))
2079 			return dsa_slave_changeupper(dev, ptr);
2080 
2081 		if (netif_is_lag_master(dev))
2082 			return dsa_slave_lag_changeupper(dev, ptr);
2083 
2084 		break;
2085 	case NETDEV_CHANGELOWERSTATE: {
2086 		struct netdev_notifier_changelowerstate_info *info = ptr;
2087 		struct dsa_port *dp;
2088 		int err;
2089 
2090 		if (!dsa_slave_dev_check(dev))
2091 			break;
2092 
2093 		dp = dsa_slave_to_port(dev);
2094 
2095 		err = dsa_port_lag_change(dp, info->lower_state_info);
2096 		return notifier_from_errno(err);
2097 	}
2098 	case NETDEV_GOING_DOWN: {
2099 		struct dsa_port *dp, *cpu_dp;
2100 		struct dsa_switch_tree *dst;
2101 		LIST_HEAD(close_list);
2102 
2103 		if (!netdev_uses_dsa(dev))
2104 			return NOTIFY_DONE;
2105 
2106 		cpu_dp = dev->dsa_ptr;
2107 		dst = cpu_dp->ds->dst;
2108 
2109 		list_for_each_entry(dp, &dst->ports, list) {
2110 			if (!dsa_is_user_port(dp->ds, dp->index))
2111 				continue;
2112 
2113 			list_add(&dp->slave->close_list, &close_list);
2114 		}
2115 
2116 		dev_close_many(&close_list, true);
2117 
2118 		return NOTIFY_OK;
2119 	}
2120 	default:
2121 		break;
2122 	}
2123 
2124 	return NOTIFY_DONE;
2125 }
2126 
2127 static void
2128 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2129 {
2130 	struct dsa_switch *ds = switchdev_work->ds;
2131 	struct switchdev_notifier_fdb_info info;
2132 	struct dsa_port *dp;
2133 
2134 	if (!dsa_is_user_port(ds, switchdev_work->port))
2135 		return;
2136 
2137 	info.addr = switchdev_work->addr;
2138 	info.vid = switchdev_work->vid;
2139 	info.offloaded = true;
2140 	dp = dsa_to_port(ds, switchdev_work->port);
2141 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2142 				 dp->slave, &info.info, NULL);
2143 }
2144 
2145 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2146 {
2147 	struct dsa_switchdev_event_work *switchdev_work =
2148 		container_of(work, struct dsa_switchdev_event_work, work);
2149 	struct dsa_switch *ds = switchdev_work->ds;
2150 	struct dsa_port *dp;
2151 	int err;
2152 
2153 	dp = dsa_to_port(ds, switchdev_work->port);
2154 
2155 	rtnl_lock();
2156 	switch (switchdev_work->event) {
2157 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2158 		err = dsa_port_fdb_add(dp, switchdev_work->addr,
2159 				       switchdev_work->vid);
2160 		if (err) {
2161 			dev_err(ds->dev,
2162 				"port %d failed to add %pM vid %d to fdb: %d\n",
2163 				dp->index, switchdev_work->addr,
2164 				switchdev_work->vid, err);
2165 			break;
2166 		}
2167 		dsa_fdb_offload_notify(switchdev_work);
2168 		break;
2169 
2170 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2171 		err = dsa_port_fdb_del(dp, switchdev_work->addr,
2172 				       switchdev_work->vid);
2173 		if (err) {
2174 			dev_err(ds->dev,
2175 				"port %d failed to delete %pM vid %d from fdb: %d\n",
2176 				dp->index, switchdev_work->addr,
2177 				switchdev_work->vid, err);
2178 		}
2179 
2180 		break;
2181 	}
2182 	rtnl_unlock();
2183 
2184 	kfree(switchdev_work);
2185 	if (dsa_is_user_port(ds, dp->index))
2186 		dev_put(dp->slave);
2187 }
2188 
2189 static int dsa_lower_dev_walk(struct net_device *lower_dev,
2190 			      struct netdev_nested_priv *priv)
2191 {
2192 	if (dsa_slave_dev_check(lower_dev)) {
2193 		priv->data = (void *)netdev_priv(lower_dev);
2194 		return 1;
2195 	}
2196 
2197 	return 0;
2198 }
2199 
2200 static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev)
2201 {
2202 	struct netdev_nested_priv priv = {
2203 		.data = NULL,
2204 	};
2205 
2206 	netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv);
2207 
2208 	return (struct dsa_slave_priv *)priv.data;
2209 }
2210 
2211 /* Called under rcu_read_lock() */
2212 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2213 				     unsigned long event, void *ptr)
2214 {
2215 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2216 	const struct switchdev_notifier_fdb_info *fdb_info;
2217 	struct dsa_switchdev_event_work *switchdev_work;
2218 	struct dsa_port *dp;
2219 	int err;
2220 
2221 	switch (event) {
2222 	case SWITCHDEV_PORT_ATTR_SET:
2223 		err = switchdev_handle_port_attr_set(dev, ptr,
2224 						     dsa_slave_dev_check,
2225 						     dsa_slave_port_attr_set);
2226 		return notifier_from_errno(err);
2227 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2228 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2229 		fdb_info = ptr;
2230 
2231 		if (dsa_slave_dev_check(dev)) {
2232 			if (!fdb_info->added_by_user)
2233 				return NOTIFY_OK;
2234 
2235 			dp = dsa_slave_to_port(dev);
2236 		} else {
2237 			/* Snoop addresses learnt on foreign interfaces
2238 			 * bridged with us, for switches that don't
2239 			 * automatically learn SA from CPU-injected traffic
2240 			 */
2241 			struct net_device *br_dev;
2242 			struct dsa_slave_priv *p;
2243 
2244 			br_dev = netdev_master_upper_dev_get_rcu(dev);
2245 			if (!br_dev)
2246 				return NOTIFY_DONE;
2247 
2248 			if (!netif_is_bridge_master(br_dev))
2249 				return NOTIFY_DONE;
2250 
2251 			p = dsa_slave_dev_lower_find(br_dev);
2252 			if (!p)
2253 				return NOTIFY_DONE;
2254 
2255 			dp = p->dp->cpu_dp;
2256 
2257 			if (!dp->ds->assisted_learning_on_cpu_port)
2258 				return NOTIFY_DONE;
2259 
2260 			/* When the bridge learns an address on an offloaded
2261 			 * LAG we don't want to send traffic to the CPU, the
2262 			 * other ports bridged with the LAG should be able to
2263 			 * autonomously forward towards it.
2264 			 */
2265 			if (dsa_tree_offloads_netdev(dp->ds->dst, dev))
2266 				return NOTIFY_DONE;
2267 		}
2268 
2269 		if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
2270 			return NOTIFY_DONE;
2271 
2272 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2273 		if (!switchdev_work)
2274 			return NOTIFY_BAD;
2275 
2276 		INIT_WORK(&switchdev_work->work,
2277 			  dsa_slave_switchdev_event_work);
2278 		switchdev_work->ds = dp->ds;
2279 		switchdev_work->port = dp->index;
2280 		switchdev_work->event = event;
2281 
2282 		ether_addr_copy(switchdev_work->addr,
2283 				fdb_info->addr);
2284 		switchdev_work->vid = fdb_info->vid;
2285 
2286 		/* Hold a reference on the slave for dsa_fdb_offload_notify */
2287 		if (dsa_is_user_port(dp->ds, dp->index))
2288 			dev_hold(dev);
2289 		dsa_schedule_work(&switchdev_work->work);
2290 		break;
2291 	default:
2292 		return NOTIFY_DONE;
2293 	}
2294 
2295 	return NOTIFY_OK;
2296 }
2297 
2298 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2299 					      unsigned long event, void *ptr)
2300 {
2301 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2302 	int err;
2303 
2304 	switch (event) {
2305 	case SWITCHDEV_PORT_OBJ_ADD:
2306 		err = switchdev_handle_port_obj_add(dev, ptr,
2307 						    dsa_slave_dev_check,
2308 						    dsa_slave_port_obj_add);
2309 		return notifier_from_errno(err);
2310 	case SWITCHDEV_PORT_OBJ_DEL:
2311 		err = switchdev_handle_port_obj_del(dev, ptr,
2312 						    dsa_slave_dev_check,
2313 						    dsa_slave_port_obj_del);
2314 		return notifier_from_errno(err);
2315 	case SWITCHDEV_PORT_ATTR_SET:
2316 		err = switchdev_handle_port_attr_set(dev, ptr,
2317 						     dsa_slave_dev_check,
2318 						     dsa_slave_port_attr_set);
2319 		return notifier_from_errno(err);
2320 	}
2321 
2322 	return NOTIFY_DONE;
2323 }
2324 
2325 static struct notifier_block dsa_slave_nb __read_mostly = {
2326 	.notifier_call  = dsa_slave_netdevice_event,
2327 };
2328 
2329 static struct notifier_block dsa_slave_switchdev_notifier = {
2330 	.notifier_call = dsa_slave_switchdev_event,
2331 };
2332 
2333 static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2334 	.notifier_call = dsa_slave_switchdev_blocking_event,
2335 };
2336 
2337 int dsa_slave_register_notifier(void)
2338 {
2339 	struct notifier_block *nb;
2340 	int err;
2341 
2342 	err = register_netdevice_notifier(&dsa_slave_nb);
2343 	if (err)
2344 		return err;
2345 
2346 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2347 	if (err)
2348 		goto err_switchdev_nb;
2349 
2350 	nb = &dsa_slave_switchdev_blocking_notifier;
2351 	err = register_switchdev_blocking_notifier(nb);
2352 	if (err)
2353 		goto err_switchdev_blocking_nb;
2354 
2355 	return 0;
2356 
2357 err_switchdev_blocking_nb:
2358 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2359 err_switchdev_nb:
2360 	unregister_netdevice_notifier(&dsa_slave_nb);
2361 	return err;
2362 }
2363 
2364 void dsa_slave_unregister_notifier(void)
2365 {
2366 	struct notifier_block *nb;
2367 	int err;
2368 
2369 	nb = &dsa_slave_switchdev_blocking_notifier;
2370 	err = unregister_switchdev_blocking_notifier(nb);
2371 	if (err)
2372 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2373 
2374 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2375 	if (err)
2376 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2377 
2378 	err = unregister_netdevice_notifier(&dsa_slave_nb);
2379 	if (err)
2380 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
2381 }
2382