xref: /openbmc/linux/net/dsa/slave.c (revision 66dabbb6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24 
25 #include "dsa.h"
26 #include "port.h"
27 #include "master.h"
28 #include "netlink.h"
29 #include "slave.h"
30 #include "tag.h"
31 
32 struct dsa_switchdev_event_work {
33 	struct net_device *dev;
34 	struct net_device *orig_dev;
35 	struct work_struct work;
36 	unsigned long event;
37 	/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
38 	 * SWITCHDEV_FDB_DEL_TO_DEVICE
39 	 */
40 	unsigned char addr[ETH_ALEN];
41 	u16 vid;
42 	bool host_addr;
43 };
44 
45 enum dsa_standalone_event {
46 	DSA_UC_ADD,
47 	DSA_UC_DEL,
48 	DSA_MC_ADD,
49 	DSA_MC_DEL,
50 };
51 
52 struct dsa_standalone_event_work {
53 	struct work_struct work;
54 	struct net_device *dev;
55 	enum dsa_standalone_event event;
56 	unsigned char addr[ETH_ALEN];
57 	u16 vid;
58 };
59 
60 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
61 {
62 	return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
63 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
64 	       !ds->needs_standalone_vlan_filtering;
65 }
66 
67 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
68 {
69 	return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
70 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
71 	       !ds->needs_standalone_vlan_filtering;
72 }
73 
74 static void dsa_slave_standalone_event_work(struct work_struct *work)
75 {
76 	struct dsa_standalone_event_work *standalone_work =
77 		container_of(work, struct dsa_standalone_event_work, work);
78 	const unsigned char *addr = standalone_work->addr;
79 	struct net_device *dev = standalone_work->dev;
80 	struct dsa_port *dp = dsa_slave_to_port(dev);
81 	struct switchdev_obj_port_mdb mdb;
82 	struct dsa_switch *ds = dp->ds;
83 	u16 vid = standalone_work->vid;
84 	int err;
85 
86 	switch (standalone_work->event) {
87 	case DSA_UC_ADD:
88 		err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
89 		if (err) {
90 			dev_err(ds->dev,
91 				"port %d failed to add %pM vid %d to fdb: %d\n",
92 				dp->index, addr, vid, err);
93 			break;
94 		}
95 		break;
96 
97 	case DSA_UC_DEL:
98 		err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
99 		if (err) {
100 			dev_err(ds->dev,
101 				"port %d failed to delete %pM vid %d from fdb: %d\n",
102 				dp->index, addr, vid, err);
103 		}
104 
105 		break;
106 	case DSA_MC_ADD:
107 		ether_addr_copy(mdb.addr, addr);
108 		mdb.vid = vid;
109 
110 		err = dsa_port_standalone_host_mdb_add(dp, &mdb);
111 		if (err) {
112 			dev_err(ds->dev,
113 				"port %d failed to add %pM vid %d to mdb: %d\n",
114 				dp->index, addr, vid, err);
115 			break;
116 		}
117 		break;
118 	case DSA_MC_DEL:
119 		ether_addr_copy(mdb.addr, addr);
120 		mdb.vid = vid;
121 
122 		err = dsa_port_standalone_host_mdb_del(dp, &mdb);
123 		if (err) {
124 			dev_err(ds->dev,
125 				"port %d failed to delete %pM vid %d from mdb: %d\n",
126 				dp->index, addr, vid, err);
127 		}
128 
129 		break;
130 	}
131 
132 	kfree(standalone_work);
133 }
134 
135 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
136 					      enum dsa_standalone_event event,
137 					      const unsigned char *addr,
138 					      u16 vid)
139 {
140 	struct dsa_standalone_event_work *standalone_work;
141 
142 	standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
143 	if (!standalone_work)
144 		return -ENOMEM;
145 
146 	INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
147 	standalone_work->event = event;
148 	standalone_work->dev = dev;
149 
150 	ether_addr_copy(standalone_work->addr, addr);
151 	standalone_work->vid = vid;
152 
153 	dsa_schedule_work(&standalone_work->work);
154 
155 	return 0;
156 }
157 
158 static int dsa_slave_sync_uc(struct net_device *dev,
159 			     const unsigned char *addr)
160 {
161 	struct net_device *master = dsa_slave_to_master(dev);
162 	struct dsa_port *dp = dsa_slave_to_port(dev);
163 
164 	dev_uc_add(master, addr);
165 
166 	if (!dsa_switch_supports_uc_filtering(dp->ds))
167 		return 0;
168 
169 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
170 }
171 
172 static int dsa_slave_unsync_uc(struct net_device *dev,
173 			       const unsigned char *addr)
174 {
175 	struct net_device *master = dsa_slave_to_master(dev);
176 	struct dsa_port *dp = dsa_slave_to_port(dev);
177 
178 	dev_uc_del(master, addr);
179 
180 	if (!dsa_switch_supports_uc_filtering(dp->ds))
181 		return 0;
182 
183 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
184 }
185 
186 static int dsa_slave_sync_mc(struct net_device *dev,
187 			     const unsigned char *addr)
188 {
189 	struct net_device *master = dsa_slave_to_master(dev);
190 	struct dsa_port *dp = dsa_slave_to_port(dev);
191 
192 	dev_mc_add(master, addr);
193 
194 	if (!dsa_switch_supports_mc_filtering(dp->ds))
195 		return 0;
196 
197 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
198 }
199 
200 static int dsa_slave_unsync_mc(struct net_device *dev,
201 			       const unsigned char *addr)
202 {
203 	struct net_device *master = dsa_slave_to_master(dev);
204 	struct dsa_port *dp = dsa_slave_to_port(dev);
205 
206 	dev_mc_del(master, addr);
207 
208 	if (!dsa_switch_supports_mc_filtering(dp->ds))
209 		return 0;
210 
211 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
212 }
213 
214 void dsa_slave_sync_ha(struct net_device *dev)
215 {
216 	struct dsa_port *dp = dsa_slave_to_port(dev);
217 	struct dsa_switch *ds = dp->ds;
218 	struct netdev_hw_addr *ha;
219 
220 	netif_addr_lock_bh(dev);
221 
222 	netdev_for_each_synced_mc_addr(ha, dev)
223 		dsa_slave_sync_mc(dev, ha->addr);
224 
225 	netdev_for_each_synced_uc_addr(ha, dev)
226 		dsa_slave_sync_uc(dev, ha->addr);
227 
228 	netif_addr_unlock_bh(dev);
229 
230 	if (dsa_switch_supports_uc_filtering(ds) ||
231 	    dsa_switch_supports_mc_filtering(ds))
232 		dsa_flush_workqueue();
233 }
234 
235 void dsa_slave_unsync_ha(struct net_device *dev)
236 {
237 	struct dsa_port *dp = dsa_slave_to_port(dev);
238 	struct dsa_switch *ds = dp->ds;
239 	struct netdev_hw_addr *ha;
240 
241 	netif_addr_lock_bh(dev);
242 
243 	netdev_for_each_synced_uc_addr(ha, dev)
244 		dsa_slave_unsync_uc(dev, ha->addr);
245 
246 	netdev_for_each_synced_mc_addr(ha, dev)
247 		dsa_slave_unsync_mc(dev, ha->addr);
248 
249 	netif_addr_unlock_bh(dev);
250 
251 	if (dsa_switch_supports_uc_filtering(ds) ||
252 	    dsa_switch_supports_mc_filtering(ds))
253 		dsa_flush_workqueue();
254 }
255 
256 /* slave mii_bus handling ***************************************************/
257 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
258 {
259 	struct dsa_switch *ds = bus->priv;
260 
261 	if (ds->phys_mii_mask & (1 << addr))
262 		return ds->ops->phy_read(ds, addr, reg);
263 
264 	return 0xffff;
265 }
266 
267 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
268 {
269 	struct dsa_switch *ds = bus->priv;
270 
271 	if (ds->phys_mii_mask & (1 << addr))
272 		return ds->ops->phy_write(ds, addr, reg, val);
273 
274 	return 0;
275 }
276 
277 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
278 {
279 	ds->slave_mii_bus->priv = (void *)ds;
280 	ds->slave_mii_bus->name = "dsa slave smi";
281 	ds->slave_mii_bus->read = dsa_slave_phy_read;
282 	ds->slave_mii_bus->write = dsa_slave_phy_write;
283 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
284 		 ds->dst->index, ds->index);
285 	ds->slave_mii_bus->parent = ds->dev;
286 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
287 }
288 
289 
290 /* slave device handling ****************************************************/
291 static int dsa_slave_get_iflink(const struct net_device *dev)
292 {
293 	return dsa_slave_to_master(dev)->ifindex;
294 }
295 
296 static int dsa_slave_open(struct net_device *dev)
297 {
298 	struct net_device *master = dsa_slave_to_master(dev);
299 	struct dsa_port *dp = dsa_slave_to_port(dev);
300 	struct dsa_switch *ds = dp->ds;
301 	int err;
302 
303 	err = dev_open(master, NULL);
304 	if (err < 0) {
305 		netdev_err(dev, "failed to open master %s\n", master->name);
306 		goto out;
307 	}
308 
309 	if (dsa_switch_supports_uc_filtering(ds)) {
310 		err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
311 		if (err)
312 			goto out;
313 	}
314 
315 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
316 		err = dev_uc_add(master, dev->dev_addr);
317 		if (err < 0)
318 			goto del_host_addr;
319 	}
320 
321 	err = dsa_port_enable_rt(dp, dev->phydev);
322 	if (err)
323 		goto del_unicast;
324 
325 	return 0;
326 
327 del_unicast:
328 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
329 		dev_uc_del(master, dev->dev_addr);
330 del_host_addr:
331 	if (dsa_switch_supports_uc_filtering(ds))
332 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
333 out:
334 	return err;
335 }
336 
337 static int dsa_slave_close(struct net_device *dev)
338 {
339 	struct net_device *master = dsa_slave_to_master(dev);
340 	struct dsa_port *dp = dsa_slave_to_port(dev);
341 	struct dsa_switch *ds = dp->ds;
342 
343 	dsa_port_disable_rt(dp);
344 
345 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
346 		dev_uc_del(master, dev->dev_addr);
347 
348 	if (dsa_switch_supports_uc_filtering(ds))
349 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
350 
351 	return 0;
352 }
353 
354 static void dsa_slave_manage_host_flood(struct net_device *dev)
355 {
356 	bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
357 	struct dsa_port *dp = dsa_slave_to_port(dev);
358 	bool uc = dev->flags & IFF_PROMISC;
359 
360 	dsa_port_set_host_flood(dp, uc, mc);
361 }
362 
363 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
364 {
365 	struct net_device *master = dsa_slave_to_master(dev);
366 	struct dsa_port *dp = dsa_slave_to_port(dev);
367 	struct dsa_switch *ds = dp->ds;
368 
369 	if (change & IFF_ALLMULTI)
370 		dev_set_allmulti(master,
371 				 dev->flags & IFF_ALLMULTI ? 1 : -1);
372 	if (change & IFF_PROMISC)
373 		dev_set_promiscuity(master,
374 				    dev->flags & IFF_PROMISC ? 1 : -1);
375 
376 	if (dsa_switch_supports_uc_filtering(ds) &&
377 	    dsa_switch_supports_mc_filtering(ds))
378 		dsa_slave_manage_host_flood(dev);
379 }
380 
381 static void dsa_slave_set_rx_mode(struct net_device *dev)
382 {
383 	__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
384 	__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
385 }
386 
387 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
388 {
389 	struct net_device *master = dsa_slave_to_master(dev);
390 	struct dsa_port *dp = dsa_slave_to_port(dev);
391 	struct dsa_switch *ds = dp->ds;
392 	struct sockaddr *addr = a;
393 	int err;
394 
395 	if (!is_valid_ether_addr(addr->sa_data))
396 		return -EADDRNOTAVAIL;
397 
398 	/* If the port is down, the address isn't synced yet to hardware or
399 	 * to the DSA master, so there is nothing to change.
400 	 */
401 	if (!(dev->flags & IFF_UP))
402 		goto out_change_dev_addr;
403 
404 	if (dsa_switch_supports_uc_filtering(ds)) {
405 		err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
406 		if (err)
407 			return err;
408 	}
409 
410 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
411 		err = dev_uc_add(master, addr->sa_data);
412 		if (err < 0)
413 			goto del_unicast;
414 	}
415 
416 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
417 		dev_uc_del(master, dev->dev_addr);
418 
419 	if (dsa_switch_supports_uc_filtering(ds))
420 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
421 
422 out_change_dev_addr:
423 	eth_hw_addr_set(dev, addr->sa_data);
424 
425 	return 0;
426 
427 del_unicast:
428 	if (dsa_switch_supports_uc_filtering(ds))
429 		dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
430 
431 	return err;
432 }
433 
434 struct dsa_slave_dump_ctx {
435 	struct net_device *dev;
436 	struct sk_buff *skb;
437 	struct netlink_callback *cb;
438 	int idx;
439 };
440 
441 static int
442 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
443 			   bool is_static, void *data)
444 {
445 	struct dsa_slave_dump_ctx *dump = data;
446 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
447 	u32 seq = dump->cb->nlh->nlmsg_seq;
448 	struct nlmsghdr *nlh;
449 	struct ndmsg *ndm;
450 
451 	if (dump->idx < dump->cb->args[2])
452 		goto skip;
453 
454 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
455 			sizeof(*ndm), NLM_F_MULTI);
456 	if (!nlh)
457 		return -EMSGSIZE;
458 
459 	ndm = nlmsg_data(nlh);
460 	ndm->ndm_family  = AF_BRIDGE;
461 	ndm->ndm_pad1    = 0;
462 	ndm->ndm_pad2    = 0;
463 	ndm->ndm_flags   = NTF_SELF;
464 	ndm->ndm_type    = 0;
465 	ndm->ndm_ifindex = dump->dev->ifindex;
466 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
467 
468 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
469 		goto nla_put_failure;
470 
471 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
472 		goto nla_put_failure;
473 
474 	nlmsg_end(dump->skb, nlh);
475 
476 skip:
477 	dump->idx++;
478 	return 0;
479 
480 nla_put_failure:
481 	nlmsg_cancel(dump->skb, nlh);
482 	return -EMSGSIZE;
483 }
484 
485 static int
486 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
487 		   struct net_device *dev, struct net_device *filter_dev,
488 		   int *idx)
489 {
490 	struct dsa_port *dp = dsa_slave_to_port(dev);
491 	struct dsa_slave_dump_ctx dump = {
492 		.dev = dev,
493 		.skb = skb,
494 		.cb = cb,
495 		.idx = *idx,
496 	};
497 	int err;
498 
499 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
500 	*idx = dump.idx;
501 
502 	return err;
503 }
504 
505 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
506 {
507 	struct dsa_slave_priv *p = netdev_priv(dev);
508 	struct dsa_switch *ds = p->dp->ds;
509 	int port = p->dp->index;
510 
511 	/* Pass through to switch driver if it supports timestamping */
512 	switch (cmd) {
513 	case SIOCGHWTSTAMP:
514 		if (ds->ops->port_hwtstamp_get)
515 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
516 		break;
517 	case SIOCSHWTSTAMP:
518 		if (ds->ops->port_hwtstamp_set)
519 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
520 		break;
521 	}
522 
523 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
524 }
525 
526 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
527 				   const struct switchdev_attr *attr,
528 				   struct netlink_ext_ack *extack)
529 {
530 	struct dsa_port *dp = dsa_slave_to_port(dev);
531 	int ret;
532 
533 	if (ctx && ctx != dp)
534 		return 0;
535 
536 	switch (attr->id) {
537 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
538 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
539 			return -EOPNOTSUPP;
540 
541 		ret = dsa_port_set_state(dp, attr->u.stp_state, true);
542 		break;
543 	case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
544 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
545 			return -EOPNOTSUPP;
546 
547 		ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
548 		break;
549 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
550 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
551 			return -EOPNOTSUPP;
552 
553 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
554 					      extack);
555 		break;
556 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
557 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
558 			return -EOPNOTSUPP;
559 
560 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
561 		break;
562 	case SWITCHDEV_ATTR_ID_BRIDGE_MST:
563 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
564 			return -EOPNOTSUPP;
565 
566 		ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
567 		break;
568 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
569 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
570 			return -EOPNOTSUPP;
571 
572 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
573 						extack);
574 		break;
575 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
576 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
577 			return -EOPNOTSUPP;
578 
579 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
580 		break;
581 	case SWITCHDEV_ATTR_ID_VLAN_MSTI:
582 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
583 			return -EOPNOTSUPP;
584 
585 		ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
586 		break;
587 	default:
588 		ret = -EOPNOTSUPP;
589 		break;
590 	}
591 
592 	return ret;
593 }
594 
595 /* Must be called under rcu_read_lock() */
596 static int
597 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
598 				      const struct switchdev_obj_port_vlan *vlan)
599 {
600 	struct net_device *upper_dev;
601 	struct list_head *iter;
602 
603 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
604 		u16 vid;
605 
606 		if (!is_vlan_dev(upper_dev))
607 			continue;
608 
609 		vid = vlan_dev_vlan_id(upper_dev);
610 		if (vid == vlan->vid)
611 			return -EBUSY;
612 	}
613 
614 	return 0;
615 }
616 
617 static int dsa_slave_vlan_add(struct net_device *dev,
618 			      const struct switchdev_obj *obj,
619 			      struct netlink_ext_ack *extack)
620 {
621 	struct dsa_port *dp = dsa_slave_to_port(dev);
622 	struct switchdev_obj_port_vlan *vlan;
623 	int err;
624 
625 	if (dsa_port_skip_vlan_configuration(dp)) {
626 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
627 		return 0;
628 	}
629 
630 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
631 
632 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
633 	 * the same VID.
634 	 */
635 	if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
636 		rcu_read_lock();
637 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
638 		rcu_read_unlock();
639 		if (err) {
640 			NL_SET_ERR_MSG_MOD(extack,
641 					   "Port already has a VLAN upper with this VID");
642 			return err;
643 		}
644 	}
645 
646 	return dsa_port_vlan_add(dp, vlan, extack);
647 }
648 
649 /* Offload a VLAN installed on the bridge or on a foreign interface by
650  * installing it as a VLAN towards the CPU port.
651  */
652 static int dsa_slave_host_vlan_add(struct net_device *dev,
653 				   const struct switchdev_obj *obj,
654 				   struct netlink_ext_ack *extack)
655 {
656 	struct dsa_port *dp = dsa_slave_to_port(dev);
657 	struct switchdev_obj_port_vlan vlan;
658 
659 	/* Do nothing if this is a software bridge */
660 	if (!dp->bridge)
661 		return -EOPNOTSUPP;
662 
663 	if (dsa_port_skip_vlan_configuration(dp)) {
664 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
665 		return 0;
666 	}
667 
668 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
669 
670 	/* Even though drivers often handle CPU membership in special ways,
671 	 * it doesn't make sense to program a PVID, so clear this flag.
672 	 */
673 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
674 
675 	return dsa_port_host_vlan_add(dp, &vlan, extack);
676 }
677 
678 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
679 				  const struct switchdev_obj *obj,
680 				  struct netlink_ext_ack *extack)
681 {
682 	struct dsa_port *dp = dsa_slave_to_port(dev);
683 	int err;
684 
685 	if (ctx && ctx != dp)
686 		return 0;
687 
688 	switch (obj->id) {
689 	case SWITCHDEV_OBJ_ID_PORT_MDB:
690 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
691 			return -EOPNOTSUPP;
692 
693 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
694 		break;
695 	case SWITCHDEV_OBJ_ID_HOST_MDB:
696 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
697 			return -EOPNOTSUPP;
698 
699 		err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
700 		break;
701 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
702 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
703 			err = dsa_slave_vlan_add(dev, obj, extack);
704 		else
705 			err = dsa_slave_host_vlan_add(dev, obj, extack);
706 		break;
707 	case SWITCHDEV_OBJ_ID_MRP:
708 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
709 			return -EOPNOTSUPP;
710 
711 		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
712 		break;
713 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
714 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
715 			return -EOPNOTSUPP;
716 
717 		err = dsa_port_mrp_add_ring_role(dp,
718 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
719 		break;
720 	default:
721 		err = -EOPNOTSUPP;
722 		break;
723 	}
724 
725 	return err;
726 }
727 
728 static int dsa_slave_vlan_del(struct net_device *dev,
729 			      const struct switchdev_obj *obj)
730 {
731 	struct dsa_port *dp = dsa_slave_to_port(dev);
732 	struct switchdev_obj_port_vlan *vlan;
733 
734 	if (dsa_port_skip_vlan_configuration(dp))
735 		return 0;
736 
737 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
738 
739 	return dsa_port_vlan_del(dp, vlan);
740 }
741 
742 static int dsa_slave_host_vlan_del(struct net_device *dev,
743 				   const struct switchdev_obj *obj)
744 {
745 	struct dsa_port *dp = dsa_slave_to_port(dev);
746 	struct switchdev_obj_port_vlan *vlan;
747 
748 	/* Do nothing if this is a software bridge */
749 	if (!dp->bridge)
750 		return -EOPNOTSUPP;
751 
752 	if (dsa_port_skip_vlan_configuration(dp))
753 		return 0;
754 
755 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
756 
757 	return dsa_port_host_vlan_del(dp, vlan);
758 }
759 
760 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
761 				  const struct switchdev_obj *obj)
762 {
763 	struct dsa_port *dp = dsa_slave_to_port(dev);
764 	int err;
765 
766 	if (ctx && ctx != dp)
767 		return 0;
768 
769 	switch (obj->id) {
770 	case SWITCHDEV_OBJ_ID_PORT_MDB:
771 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
772 			return -EOPNOTSUPP;
773 
774 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
775 		break;
776 	case SWITCHDEV_OBJ_ID_HOST_MDB:
777 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
778 			return -EOPNOTSUPP;
779 
780 		err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
781 		break;
782 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
783 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
784 			err = dsa_slave_vlan_del(dev, obj);
785 		else
786 			err = dsa_slave_host_vlan_del(dev, obj);
787 		break;
788 	case SWITCHDEV_OBJ_ID_MRP:
789 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
790 			return -EOPNOTSUPP;
791 
792 		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
793 		break;
794 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
795 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
796 			return -EOPNOTSUPP;
797 
798 		err = dsa_port_mrp_del_ring_role(dp,
799 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
800 		break;
801 	default:
802 		err = -EOPNOTSUPP;
803 		break;
804 	}
805 
806 	return err;
807 }
808 
809 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
810 						     struct sk_buff *skb)
811 {
812 #ifdef CONFIG_NET_POLL_CONTROLLER
813 	struct dsa_slave_priv *p = netdev_priv(dev);
814 
815 	return netpoll_send_skb(p->netpoll, skb);
816 #else
817 	BUG();
818 	return NETDEV_TX_OK;
819 #endif
820 }
821 
822 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
823 				 struct sk_buff *skb)
824 {
825 	struct dsa_switch *ds = p->dp->ds;
826 
827 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
828 		return;
829 
830 	if (!ds->ops->port_txtstamp)
831 		return;
832 
833 	ds->ops->port_txtstamp(ds, p->dp->index, skb);
834 }
835 
836 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
837 {
838 	/* SKB for netpoll still need to be mangled with the protocol-specific
839 	 * tag to be successfully transmitted
840 	 */
841 	if (unlikely(netpoll_tx_running(dev)))
842 		return dsa_slave_netpoll_send_skb(dev, skb);
843 
844 	/* Queue the SKB for transmission on the parent interface, but
845 	 * do not modify its EtherType
846 	 */
847 	skb->dev = dsa_slave_to_master(dev);
848 	dev_queue_xmit(skb);
849 
850 	return NETDEV_TX_OK;
851 }
852 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
853 
854 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
855 {
856 	int needed_headroom = dev->needed_headroom;
857 	int needed_tailroom = dev->needed_tailroom;
858 
859 	/* For tail taggers, we need to pad short frames ourselves, to ensure
860 	 * that the tail tag does not fail at its role of being at the end of
861 	 * the packet, once the master interface pads the frame. Account for
862 	 * that pad length here, and pad later.
863 	 */
864 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
865 		needed_tailroom += ETH_ZLEN - skb->len;
866 	/* skb_headroom() returns unsigned int... */
867 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
868 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
869 
870 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
871 		/* No reallocation needed, yay! */
872 		return 0;
873 
874 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
875 				GFP_ATOMIC);
876 }
877 
878 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
879 {
880 	struct dsa_slave_priv *p = netdev_priv(dev);
881 	struct sk_buff *nskb;
882 
883 	dev_sw_netstats_tx_add(dev, 1, skb->len);
884 
885 	memset(skb->cb, 0, sizeof(skb->cb));
886 
887 	/* Handle tx timestamp if any */
888 	dsa_skb_tx_timestamp(p, skb);
889 
890 	if (dsa_realloc_skb(skb, dev)) {
891 		dev_kfree_skb_any(skb);
892 		return NETDEV_TX_OK;
893 	}
894 
895 	/* needed_tailroom should still be 'warm' in the cache line from
896 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
897 	 */
898 	if (dev->needed_tailroom)
899 		eth_skb_pad(skb);
900 
901 	/* Transmit function may have to reallocate the original SKB,
902 	 * in which case it must have freed it. Only free it here on error.
903 	 */
904 	nskb = p->xmit(skb, dev);
905 	if (!nskb) {
906 		kfree_skb(skb);
907 		return NETDEV_TX_OK;
908 	}
909 
910 	return dsa_enqueue_skb(nskb, dev);
911 }
912 
913 /* ethtool operations *******************************************************/
914 
915 static void dsa_slave_get_drvinfo(struct net_device *dev,
916 				  struct ethtool_drvinfo *drvinfo)
917 {
918 	strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
919 	strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
920 	strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
921 }
922 
923 static int dsa_slave_get_regs_len(struct net_device *dev)
924 {
925 	struct dsa_port *dp = dsa_slave_to_port(dev);
926 	struct dsa_switch *ds = dp->ds;
927 
928 	if (ds->ops->get_regs_len)
929 		return ds->ops->get_regs_len(ds, dp->index);
930 
931 	return -EOPNOTSUPP;
932 }
933 
934 static void
935 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
936 {
937 	struct dsa_port *dp = dsa_slave_to_port(dev);
938 	struct dsa_switch *ds = dp->ds;
939 
940 	if (ds->ops->get_regs)
941 		ds->ops->get_regs(ds, dp->index, regs, _p);
942 }
943 
944 static int dsa_slave_nway_reset(struct net_device *dev)
945 {
946 	struct dsa_port *dp = dsa_slave_to_port(dev);
947 
948 	return phylink_ethtool_nway_reset(dp->pl);
949 }
950 
951 static int dsa_slave_get_eeprom_len(struct net_device *dev)
952 {
953 	struct dsa_port *dp = dsa_slave_to_port(dev);
954 	struct dsa_switch *ds = dp->ds;
955 
956 	if (ds->cd && ds->cd->eeprom_len)
957 		return ds->cd->eeprom_len;
958 
959 	if (ds->ops->get_eeprom_len)
960 		return ds->ops->get_eeprom_len(ds);
961 
962 	return 0;
963 }
964 
965 static int dsa_slave_get_eeprom(struct net_device *dev,
966 				struct ethtool_eeprom *eeprom, u8 *data)
967 {
968 	struct dsa_port *dp = dsa_slave_to_port(dev);
969 	struct dsa_switch *ds = dp->ds;
970 
971 	if (ds->ops->get_eeprom)
972 		return ds->ops->get_eeprom(ds, eeprom, data);
973 
974 	return -EOPNOTSUPP;
975 }
976 
977 static int dsa_slave_set_eeprom(struct net_device *dev,
978 				struct ethtool_eeprom *eeprom, u8 *data)
979 {
980 	struct dsa_port *dp = dsa_slave_to_port(dev);
981 	struct dsa_switch *ds = dp->ds;
982 
983 	if (ds->ops->set_eeprom)
984 		return ds->ops->set_eeprom(ds, eeprom, data);
985 
986 	return -EOPNOTSUPP;
987 }
988 
989 static void dsa_slave_get_strings(struct net_device *dev,
990 				  uint32_t stringset, uint8_t *data)
991 {
992 	struct dsa_port *dp = dsa_slave_to_port(dev);
993 	struct dsa_switch *ds = dp->ds;
994 
995 	if (stringset == ETH_SS_STATS) {
996 		int len = ETH_GSTRING_LEN;
997 
998 		strncpy(data, "tx_packets", len);
999 		strncpy(data + len, "tx_bytes", len);
1000 		strncpy(data + 2 * len, "rx_packets", len);
1001 		strncpy(data + 3 * len, "rx_bytes", len);
1002 		if (ds->ops->get_strings)
1003 			ds->ops->get_strings(ds, dp->index, stringset,
1004 					     data + 4 * len);
1005 	} else if (stringset ==  ETH_SS_TEST) {
1006 		net_selftest_get_strings(data);
1007 	}
1008 
1009 }
1010 
1011 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
1012 					struct ethtool_stats *stats,
1013 					uint64_t *data)
1014 {
1015 	struct dsa_port *dp = dsa_slave_to_port(dev);
1016 	struct dsa_switch *ds = dp->ds;
1017 	struct pcpu_sw_netstats *s;
1018 	unsigned int start;
1019 	int i;
1020 
1021 	for_each_possible_cpu(i) {
1022 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1023 
1024 		s = per_cpu_ptr(dev->tstats, i);
1025 		do {
1026 			start = u64_stats_fetch_begin(&s->syncp);
1027 			tx_packets = u64_stats_read(&s->tx_packets);
1028 			tx_bytes = u64_stats_read(&s->tx_bytes);
1029 			rx_packets = u64_stats_read(&s->rx_packets);
1030 			rx_bytes = u64_stats_read(&s->rx_bytes);
1031 		} while (u64_stats_fetch_retry(&s->syncp, start));
1032 		data[0] += tx_packets;
1033 		data[1] += tx_bytes;
1034 		data[2] += rx_packets;
1035 		data[3] += rx_bytes;
1036 	}
1037 	if (ds->ops->get_ethtool_stats)
1038 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1039 }
1040 
1041 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
1042 {
1043 	struct dsa_port *dp = dsa_slave_to_port(dev);
1044 	struct dsa_switch *ds = dp->ds;
1045 
1046 	if (sset == ETH_SS_STATS) {
1047 		int count = 0;
1048 
1049 		if (ds->ops->get_sset_count) {
1050 			count = ds->ops->get_sset_count(ds, dp->index, sset);
1051 			if (count < 0)
1052 				return count;
1053 		}
1054 
1055 		return count + 4;
1056 	} else if (sset ==  ETH_SS_TEST) {
1057 		return net_selftest_get_count();
1058 	}
1059 
1060 	return -EOPNOTSUPP;
1061 }
1062 
1063 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
1064 					struct ethtool_eth_phy_stats *phy_stats)
1065 {
1066 	struct dsa_port *dp = dsa_slave_to_port(dev);
1067 	struct dsa_switch *ds = dp->ds;
1068 
1069 	if (ds->ops->get_eth_phy_stats)
1070 		ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1071 }
1072 
1073 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
1074 					struct ethtool_eth_mac_stats *mac_stats)
1075 {
1076 	struct dsa_port *dp = dsa_slave_to_port(dev);
1077 	struct dsa_switch *ds = dp->ds;
1078 
1079 	if (ds->ops->get_eth_mac_stats)
1080 		ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1081 }
1082 
1083 static void
1084 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
1085 			     struct ethtool_eth_ctrl_stats *ctrl_stats)
1086 {
1087 	struct dsa_port *dp = dsa_slave_to_port(dev);
1088 	struct dsa_switch *ds = dp->ds;
1089 
1090 	if (ds->ops->get_eth_ctrl_stats)
1091 		ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1092 }
1093 
1094 static void
1095 dsa_slave_get_rmon_stats(struct net_device *dev,
1096 			 struct ethtool_rmon_stats *rmon_stats,
1097 			 const struct ethtool_rmon_hist_range **ranges)
1098 {
1099 	struct dsa_port *dp = dsa_slave_to_port(dev);
1100 	struct dsa_switch *ds = dp->ds;
1101 
1102 	if (ds->ops->get_rmon_stats)
1103 		ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1104 }
1105 
1106 static void dsa_slave_net_selftest(struct net_device *ndev,
1107 				   struct ethtool_test *etest, u64 *buf)
1108 {
1109 	struct dsa_port *dp = dsa_slave_to_port(ndev);
1110 	struct dsa_switch *ds = dp->ds;
1111 
1112 	if (ds->ops->self_test) {
1113 		ds->ops->self_test(ds, dp->index, etest, buf);
1114 		return;
1115 	}
1116 
1117 	net_selftest(ndev, etest, buf);
1118 }
1119 
1120 static int dsa_slave_get_mm(struct net_device *dev,
1121 			    struct ethtool_mm_state *state)
1122 {
1123 	struct dsa_port *dp = dsa_slave_to_port(dev);
1124 	struct dsa_switch *ds = dp->ds;
1125 
1126 	if (!ds->ops->get_mm)
1127 		return -EOPNOTSUPP;
1128 
1129 	return ds->ops->get_mm(ds, dp->index, state);
1130 }
1131 
1132 static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
1133 			    struct netlink_ext_ack *extack)
1134 {
1135 	struct dsa_port *dp = dsa_slave_to_port(dev);
1136 	struct dsa_switch *ds = dp->ds;
1137 
1138 	if (!ds->ops->set_mm)
1139 		return -EOPNOTSUPP;
1140 
1141 	return ds->ops->set_mm(ds, dp->index, cfg, extack);
1142 }
1143 
1144 static void dsa_slave_get_mm_stats(struct net_device *dev,
1145 				   struct ethtool_mm_stats *stats)
1146 {
1147 	struct dsa_port *dp = dsa_slave_to_port(dev);
1148 	struct dsa_switch *ds = dp->ds;
1149 
1150 	if (ds->ops->get_mm_stats)
1151 		ds->ops->get_mm_stats(ds, dp->index, stats);
1152 }
1153 
1154 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1155 {
1156 	struct dsa_port *dp = dsa_slave_to_port(dev);
1157 	struct dsa_switch *ds = dp->ds;
1158 
1159 	phylink_ethtool_get_wol(dp->pl, w);
1160 
1161 	if (ds->ops->get_wol)
1162 		ds->ops->get_wol(ds, dp->index, w);
1163 }
1164 
1165 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1166 {
1167 	struct dsa_port *dp = dsa_slave_to_port(dev);
1168 	struct dsa_switch *ds = dp->ds;
1169 	int ret = -EOPNOTSUPP;
1170 
1171 	phylink_ethtool_set_wol(dp->pl, w);
1172 
1173 	if (ds->ops->set_wol)
1174 		ret = ds->ops->set_wol(ds, dp->index, w);
1175 
1176 	return ret;
1177 }
1178 
1179 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1180 {
1181 	struct dsa_port *dp = dsa_slave_to_port(dev);
1182 	struct dsa_switch *ds = dp->ds;
1183 	int ret;
1184 
1185 	/* Port's PHY and MAC both need to be EEE capable */
1186 	if (!dev->phydev || !dp->pl)
1187 		return -ENODEV;
1188 
1189 	if (!ds->ops->set_mac_eee)
1190 		return -EOPNOTSUPP;
1191 
1192 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
1193 	if (ret)
1194 		return ret;
1195 
1196 	return phylink_ethtool_set_eee(dp->pl, e);
1197 }
1198 
1199 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1200 {
1201 	struct dsa_port *dp = dsa_slave_to_port(dev);
1202 	struct dsa_switch *ds = dp->ds;
1203 	int ret;
1204 
1205 	/* Port's PHY and MAC both need to be EEE capable */
1206 	if (!dev->phydev || !dp->pl)
1207 		return -ENODEV;
1208 
1209 	if (!ds->ops->get_mac_eee)
1210 		return -EOPNOTSUPP;
1211 
1212 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
1213 	if (ret)
1214 		return ret;
1215 
1216 	return phylink_ethtool_get_eee(dp->pl, e);
1217 }
1218 
1219 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1220 					struct ethtool_link_ksettings *cmd)
1221 {
1222 	struct dsa_port *dp = dsa_slave_to_port(dev);
1223 
1224 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
1225 }
1226 
1227 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1228 					const struct ethtool_link_ksettings *cmd)
1229 {
1230 	struct dsa_port *dp = dsa_slave_to_port(dev);
1231 
1232 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
1233 }
1234 
1235 static void dsa_slave_get_pause_stats(struct net_device *dev,
1236 				  struct ethtool_pause_stats *pause_stats)
1237 {
1238 	struct dsa_port *dp = dsa_slave_to_port(dev);
1239 	struct dsa_switch *ds = dp->ds;
1240 
1241 	if (ds->ops->get_pause_stats)
1242 		ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1243 }
1244 
1245 static void dsa_slave_get_pauseparam(struct net_device *dev,
1246 				     struct ethtool_pauseparam *pause)
1247 {
1248 	struct dsa_port *dp = dsa_slave_to_port(dev);
1249 
1250 	phylink_ethtool_get_pauseparam(dp->pl, pause);
1251 }
1252 
1253 static int dsa_slave_set_pauseparam(struct net_device *dev,
1254 				    struct ethtool_pauseparam *pause)
1255 {
1256 	struct dsa_port *dp = dsa_slave_to_port(dev);
1257 
1258 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
1259 }
1260 
1261 #ifdef CONFIG_NET_POLL_CONTROLLER
1262 static int dsa_slave_netpoll_setup(struct net_device *dev,
1263 				   struct netpoll_info *ni)
1264 {
1265 	struct net_device *master = dsa_slave_to_master(dev);
1266 	struct dsa_slave_priv *p = netdev_priv(dev);
1267 	struct netpoll *netpoll;
1268 	int err = 0;
1269 
1270 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1271 	if (!netpoll)
1272 		return -ENOMEM;
1273 
1274 	err = __netpoll_setup(netpoll, master);
1275 	if (err) {
1276 		kfree(netpoll);
1277 		goto out;
1278 	}
1279 
1280 	p->netpoll = netpoll;
1281 out:
1282 	return err;
1283 }
1284 
1285 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1286 {
1287 	struct dsa_slave_priv *p = netdev_priv(dev);
1288 	struct netpoll *netpoll = p->netpoll;
1289 
1290 	if (!netpoll)
1291 		return;
1292 
1293 	p->netpoll = NULL;
1294 
1295 	__netpoll_free(netpoll);
1296 }
1297 
1298 static void dsa_slave_poll_controller(struct net_device *dev)
1299 {
1300 }
1301 #endif
1302 
1303 static struct dsa_mall_tc_entry *
1304 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1305 {
1306 	struct dsa_slave_priv *p = netdev_priv(dev);
1307 	struct dsa_mall_tc_entry *mall_tc_entry;
1308 
1309 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1310 		if (mall_tc_entry->cookie == cookie)
1311 			return mall_tc_entry;
1312 
1313 	return NULL;
1314 }
1315 
1316 static int
1317 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1318 				  struct tc_cls_matchall_offload *cls,
1319 				  bool ingress)
1320 {
1321 	struct netlink_ext_ack *extack = cls->common.extack;
1322 	struct dsa_port *dp = dsa_slave_to_port(dev);
1323 	struct dsa_slave_priv *p = netdev_priv(dev);
1324 	struct dsa_mall_mirror_tc_entry *mirror;
1325 	struct dsa_mall_tc_entry *mall_tc_entry;
1326 	struct dsa_switch *ds = dp->ds;
1327 	struct flow_action_entry *act;
1328 	struct dsa_port *to_dp;
1329 	int err;
1330 
1331 	if (!ds->ops->port_mirror_add)
1332 		return -EOPNOTSUPP;
1333 
1334 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1335 					      cls->common.extack))
1336 		return -EOPNOTSUPP;
1337 
1338 	act = &cls->rule->action.entries[0];
1339 
1340 	if (!act->dev)
1341 		return -EINVAL;
1342 
1343 	if (!dsa_slave_dev_check(act->dev))
1344 		return -EOPNOTSUPP;
1345 
1346 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1347 	if (!mall_tc_entry)
1348 		return -ENOMEM;
1349 
1350 	mall_tc_entry->cookie = cls->cookie;
1351 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1352 	mirror = &mall_tc_entry->mirror;
1353 
1354 	to_dp = dsa_slave_to_port(act->dev);
1355 
1356 	mirror->to_local_port = to_dp->index;
1357 	mirror->ingress = ingress;
1358 
1359 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1360 	if (err) {
1361 		kfree(mall_tc_entry);
1362 		return err;
1363 	}
1364 
1365 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1366 
1367 	return err;
1368 }
1369 
1370 static int
1371 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1372 				  struct tc_cls_matchall_offload *cls,
1373 				  bool ingress)
1374 {
1375 	struct netlink_ext_ack *extack = cls->common.extack;
1376 	struct dsa_port *dp = dsa_slave_to_port(dev);
1377 	struct dsa_slave_priv *p = netdev_priv(dev);
1378 	struct dsa_mall_policer_tc_entry *policer;
1379 	struct dsa_mall_tc_entry *mall_tc_entry;
1380 	struct dsa_switch *ds = dp->ds;
1381 	struct flow_action_entry *act;
1382 	int err;
1383 
1384 	if (!ds->ops->port_policer_add) {
1385 		NL_SET_ERR_MSG_MOD(extack,
1386 				   "Policing offload not implemented");
1387 		return -EOPNOTSUPP;
1388 	}
1389 
1390 	if (!ingress) {
1391 		NL_SET_ERR_MSG_MOD(extack,
1392 				   "Only supported on ingress qdisc");
1393 		return -EOPNOTSUPP;
1394 	}
1395 
1396 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1397 					      cls->common.extack))
1398 		return -EOPNOTSUPP;
1399 
1400 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1401 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1402 			NL_SET_ERR_MSG_MOD(extack,
1403 					   "Only one port policer allowed");
1404 			return -EEXIST;
1405 		}
1406 	}
1407 
1408 	act = &cls->rule->action.entries[0];
1409 
1410 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1411 	if (!mall_tc_entry)
1412 		return -ENOMEM;
1413 
1414 	mall_tc_entry->cookie = cls->cookie;
1415 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1416 	policer = &mall_tc_entry->policer;
1417 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1418 	policer->burst = act->police.burst;
1419 
1420 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1421 	if (err) {
1422 		kfree(mall_tc_entry);
1423 		return err;
1424 	}
1425 
1426 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1427 
1428 	return err;
1429 }
1430 
1431 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1432 				      struct tc_cls_matchall_offload *cls,
1433 				      bool ingress)
1434 {
1435 	int err = -EOPNOTSUPP;
1436 
1437 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1438 	    flow_offload_has_one_action(&cls->rule->action) &&
1439 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1440 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1441 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1442 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1443 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1444 
1445 	return err;
1446 }
1447 
1448 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1449 				       struct tc_cls_matchall_offload *cls)
1450 {
1451 	struct dsa_port *dp = dsa_slave_to_port(dev);
1452 	struct dsa_mall_tc_entry *mall_tc_entry;
1453 	struct dsa_switch *ds = dp->ds;
1454 
1455 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1456 	if (!mall_tc_entry)
1457 		return;
1458 
1459 	list_del(&mall_tc_entry->list);
1460 
1461 	switch (mall_tc_entry->type) {
1462 	case DSA_PORT_MALL_MIRROR:
1463 		if (ds->ops->port_mirror_del)
1464 			ds->ops->port_mirror_del(ds, dp->index,
1465 						 &mall_tc_entry->mirror);
1466 		break;
1467 	case DSA_PORT_MALL_POLICER:
1468 		if (ds->ops->port_policer_del)
1469 			ds->ops->port_policer_del(ds, dp->index);
1470 		break;
1471 	default:
1472 		WARN_ON(1);
1473 	}
1474 
1475 	kfree(mall_tc_entry);
1476 }
1477 
1478 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1479 					   struct tc_cls_matchall_offload *cls,
1480 					   bool ingress)
1481 {
1482 	if (cls->common.chain_index)
1483 		return -EOPNOTSUPP;
1484 
1485 	switch (cls->command) {
1486 	case TC_CLSMATCHALL_REPLACE:
1487 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1488 	case TC_CLSMATCHALL_DESTROY:
1489 		dsa_slave_del_cls_matchall(dev, cls);
1490 		return 0;
1491 	default:
1492 		return -EOPNOTSUPP;
1493 	}
1494 }
1495 
1496 static int dsa_slave_add_cls_flower(struct net_device *dev,
1497 				    struct flow_cls_offload *cls,
1498 				    bool ingress)
1499 {
1500 	struct dsa_port *dp = dsa_slave_to_port(dev);
1501 	struct dsa_switch *ds = dp->ds;
1502 	int port = dp->index;
1503 
1504 	if (!ds->ops->cls_flower_add)
1505 		return -EOPNOTSUPP;
1506 
1507 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1508 }
1509 
1510 static int dsa_slave_del_cls_flower(struct net_device *dev,
1511 				    struct flow_cls_offload *cls,
1512 				    bool ingress)
1513 {
1514 	struct dsa_port *dp = dsa_slave_to_port(dev);
1515 	struct dsa_switch *ds = dp->ds;
1516 	int port = dp->index;
1517 
1518 	if (!ds->ops->cls_flower_del)
1519 		return -EOPNOTSUPP;
1520 
1521 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1522 }
1523 
1524 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1525 				      struct flow_cls_offload *cls,
1526 				      bool ingress)
1527 {
1528 	struct dsa_port *dp = dsa_slave_to_port(dev);
1529 	struct dsa_switch *ds = dp->ds;
1530 	int port = dp->index;
1531 
1532 	if (!ds->ops->cls_flower_stats)
1533 		return -EOPNOTSUPP;
1534 
1535 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1536 }
1537 
1538 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1539 					 struct flow_cls_offload *cls,
1540 					 bool ingress)
1541 {
1542 	switch (cls->command) {
1543 	case FLOW_CLS_REPLACE:
1544 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1545 	case FLOW_CLS_DESTROY:
1546 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1547 	case FLOW_CLS_STATS:
1548 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1549 	default:
1550 		return -EOPNOTSUPP;
1551 	}
1552 }
1553 
1554 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1555 				       void *cb_priv, bool ingress)
1556 {
1557 	struct net_device *dev = cb_priv;
1558 
1559 	if (!tc_can_offload(dev))
1560 		return -EOPNOTSUPP;
1561 
1562 	switch (type) {
1563 	case TC_SETUP_CLSMATCHALL:
1564 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1565 	case TC_SETUP_CLSFLOWER:
1566 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1567 	default:
1568 		return -EOPNOTSUPP;
1569 	}
1570 }
1571 
1572 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1573 					  void *type_data, void *cb_priv)
1574 {
1575 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1576 }
1577 
1578 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1579 					  void *type_data, void *cb_priv)
1580 {
1581 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1582 }
1583 
1584 static LIST_HEAD(dsa_slave_block_cb_list);
1585 
1586 static int dsa_slave_setup_tc_block(struct net_device *dev,
1587 				    struct flow_block_offload *f)
1588 {
1589 	struct flow_block_cb *block_cb;
1590 	flow_setup_cb_t *cb;
1591 
1592 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1593 		cb = dsa_slave_setup_tc_block_cb_ig;
1594 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1595 		cb = dsa_slave_setup_tc_block_cb_eg;
1596 	else
1597 		return -EOPNOTSUPP;
1598 
1599 	f->driver_block_list = &dsa_slave_block_cb_list;
1600 
1601 	switch (f->command) {
1602 	case FLOW_BLOCK_BIND:
1603 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1604 			return -EBUSY;
1605 
1606 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1607 		if (IS_ERR(block_cb))
1608 			return PTR_ERR(block_cb);
1609 
1610 		flow_block_cb_add(block_cb, f);
1611 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1612 		return 0;
1613 	case FLOW_BLOCK_UNBIND:
1614 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1615 		if (!block_cb)
1616 			return -ENOENT;
1617 
1618 		flow_block_cb_remove(block_cb, f);
1619 		list_del(&block_cb->driver_list);
1620 		return 0;
1621 	default:
1622 		return -EOPNOTSUPP;
1623 	}
1624 }
1625 
1626 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1627 				    void *type_data)
1628 {
1629 	struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
1630 
1631 	if (!master->netdev_ops->ndo_setup_tc)
1632 		return -EOPNOTSUPP;
1633 
1634 	return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1635 }
1636 
1637 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1638 			      void *type_data)
1639 {
1640 	struct dsa_port *dp = dsa_slave_to_port(dev);
1641 	struct dsa_switch *ds = dp->ds;
1642 
1643 	switch (type) {
1644 	case TC_SETUP_BLOCK:
1645 		return dsa_slave_setup_tc_block(dev, type_data);
1646 	case TC_SETUP_FT:
1647 		return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1648 	default:
1649 		break;
1650 	}
1651 
1652 	if (!ds->ops->port_setup_tc)
1653 		return -EOPNOTSUPP;
1654 
1655 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1656 }
1657 
1658 static int dsa_slave_get_rxnfc(struct net_device *dev,
1659 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1660 {
1661 	struct dsa_port *dp = dsa_slave_to_port(dev);
1662 	struct dsa_switch *ds = dp->ds;
1663 
1664 	if (!ds->ops->get_rxnfc)
1665 		return -EOPNOTSUPP;
1666 
1667 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1668 }
1669 
1670 static int dsa_slave_set_rxnfc(struct net_device *dev,
1671 			       struct ethtool_rxnfc *nfc)
1672 {
1673 	struct dsa_port *dp = dsa_slave_to_port(dev);
1674 	struct dsa_switch *ds = dp->ds;
1675 
1676 	if (!ds->ops->set_rxnfc)
1677 		return -EOPNOTSUPP;
1678 
1679 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1680 }
1681 
1682 static int dsa_slave_get_ts_info(struct net_device *dev,
1683 				 struct ethtool_ts_info *ts)
1684 {
1685 	struct dsa_slave_priv *p = netdev_priv(dev);
1686 	struct dsa_switch *ds = p->dp->ds;
1687 
1688 	if (!ds->ops->get_ts_info)
1689 		return -EOPNOTSUPP;
1690 
1691 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1692 }
1693 
1694 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1695 				     u16 vid)
1696 {
1697 	struct dsa_port *dp = dsa_slave_to_port(dev);
1698 	struct switchdev_obj_port_vlan vlan = {
1699 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1700 		.vid = vid,
1701 		/* This API only allows programming tagged, non-PVID VIDs */
1702 		.flags = 0,
1703 	};
1704 	struct netlink_ext_ack extack = {0};
1705 	int ret;
1706 
1707 	/* User port... */
1708 	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1709 	if (ret) {
1710 		if (extack._msg)
1711 			netdev_err(dev, "%s\n", extack._msg);
1712 		return ret;
1713 	}
1714 
1715 	/* And CPU port... */
1716 	ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1717 	if (ret) {
1718 		if (extack._msg)
1719 			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1720 				   extack._msg);
1721 		return ret;
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1728 				      u16 vid)
1729 {
1730 	struct dsa_port *dp = dsa_slave_to_port(dev);
1731 	struct switchdev_obj_port_vlan vlan = {
1732 		.vid = vid,
1733 		/* This API only allows programming tagged, non-PVID VIDs */
1734 		.flags = 0,
1735 	};
1736 	int err;
1737 
1738 	err = dsa_port_vlan_del(dp, &vlan);
1739 	if (err)
1740 		return err;
1741 
1742 	return dsa_port_host_vlan_del(dp, &vlan);
1743 }
1744 
1745 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1746 {
1747 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1748 
1749 	return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1750 }
1751 
1752 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1753 {
1754 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1755 
1756 	return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1757 }
1758 
1759 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1760  * filtering is enabled. The baseline is that only ports that offload a
1761  * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1762  * but there are exceptions for quirky hardware.
1763  *
1764  * If ds->vlan_filtering_is_global = true, then standalone ports which share
1765  * the same switch with other ports that offload a VLAN-aware bridge are also
1766  * inevitably VLAN-aware.
1767  *
1768  * To summarize, a DSA switch port offloads:
1769  *
1770  * - If standalone (this includes software bridge, software LAG):
1771  *     - if ds->needs_standalone_vlan_filtering = true, OR if
1772  *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1773  *       this switch chip which have vlan_filtering=1)
1774  *         - the 8021q upper VLANs
1775  *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1776  *       global, or it is, but no port is under a VLAN-aware bridge):
1777  *         - no VLAN (any 8021q upper is a software VLAN)
1778  *
1779  * - If under a vlan_filtering=0 bridge which it offload:
1780  *     - if ds->configure_vlan_while_not_filtering = true (default):
1781  *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1782  *     - else (deprecated):
1783  *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1784  *           enabled, so this behavior is broken and discouraged.
1785  *
1786  * - If under a vlan_filtering=1 bridge which it offload:
1787  *     - the bridge VLANs
1788  *     - the 8021q upper VLANs
1789  */
1790 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1791 				    bool vlan_filtering)
1792 {
1793 	int err;
1794 
1795 	if (vlan_filtering) {
1796 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1797 
1798 		err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1799 		if (err) {
1800 			vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1801 			slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1802 			return err;
1803 		}
1804 	} else {
1805 		err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1806 		if (err)
1807 			return err;
1808 
1809 		slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1810 	}
1811 
1812 	return 0;
1813 }
1814 
1815 struct dsa_hw_port {
1816 	struct list_head list;
1817 	struct net_device *dev;
1818 	int old_mtu;
1819 };
1820 
1821 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1822 {
1823 	const struct dsa_hw_port *p;
1824 	int err;
1825 
1826 	list_for_each_entry(p, hw_port_list, list) {
1827 		if (p->dev->mtu == mtu)
1828 			continue;
1829 
1830 		err = dev_set_mtu(p->dev, mtu);
1831 		if (err)
1832 			goto rollback;
1833 	}
1834 
1835 	return 0;
1836 
1837 rollback:
1838 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1839 		if (p->dev->mtu == p->old_mtu)
1840 			continue;
1841 
1842 		if (dev_set_mtu(p->dev, p->old_mtu))
1843 			netdev_err(p->dev, "Failed to restore MTU\n");
1844 	}
1845 
1846 	return err;
1847 }
1848 
1849 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1850 {
1851 	struct dsa_hw_port *p, *n;
1852 
1853 	list_for_each_entry_safe(p, n, hw_port_list, list)
1854 		kfree(p);
1855 }
1856 
1857 /* Make the hardware datapath to/from @dev limited to a common MTU */
1858 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1859 {
1860 	struct list_head hw_port_list;
1861 	struct dsa_switch_tree *dst;
1862 	int min_mtu = ETH_MAX_MTU;
1863 	struct dsa_port *other_dp;
1864 	int err;
1865 
1866 	if (!dp->ds->mtu_enforcement_ingress)
1867 		return;
1868 
1869 	if (!dp->bridge)
1870 		return;
1871 
1872 	INIT_LIST_HEAD(&hw_port_list);
1873 
1874 	/* Populate the list of ports that are part of the same bridge
1875 	 * as the newly added/modified port
1876 	 */
1877 	list_for_each_entry(dst, &dsa_tree_list, list) {
1878 		list_for_each_entry(other_dp, &dst->ports, list) {
1879 			struct dsa_hw_port *hw_port;
1880 			struct net_device *slave;
1881 
1882 			if (other_dp->type != DSA_PORT_TYPE_USER)
1883 				continue;
1884 
1885 			if (!dsa_port_bridge_same(dp, other_dp))
1886 				continue;
1887 
1888 			if (!other_dp->ds->mtu_enforcement_ingress)
1889 				continue;
1890 
1891 			slave = other_dp->slave;
1892 
1893 			if (min_mtu > slave->mtu)
1894 				min_mtu = slave->mtu;
1895 
1896 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1897 			if (!hw_port)
1898 				goto out;
1899 
1900 			hw_port->dev = slave;
1901 			hw_port->old_mtu = slave->mtu;
1902 
1903 			list_add(&hw_port->list, &hw_port_list);
1904 		}
1905 	}
1906 
1907 	/* Attempt to configure the entire hardware bridge to the newly added
1908 	 * interface's MTU first, regardless of whether the intention of the
1909 	 * user was to raise or lower it.
1910 	 */
1911 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1912 	if (!err)
1913 		goto out;
1914 
1915 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1916 	 * all hardware bridge ports now. If this fails too, then all ports will
1917 	 * still have their old MTU rolled back anyway.
1918 	 */
1919 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1920 
1921 out:
1922 	dsa_hw_port_list_free(&hw_port_list);
1923 }
1924 
1925 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1926 {
1927 	struct net_device *master = dsa_slave_to_master(dev);
1928 	struct dsa_port *dp = dsa_slave_to_port(dev);
1929 	struct dsa_port *cpu_dp = dp->cpu_dp;
1930 	struct dsa_switch *ds = dp->ds;
1931 	struct dsa_port *other_dp;
1932 	int largest_mtu = 0;
1933 	int new_master_mtu;
1934 	int old_master_mtu;
1935 	int mtu_limit;
1936 	int overhead;
1937 	int cpu_mtu;
1938 	int err;
1939 
1940 	if (!ds->ops->port_change_mtu)
1941 		return -EOPNOTSUPP;
1942 
1943 	dsa_tree_for_each_user_port(other_dp, ds->dst) {
1944 		int slave_mtu;
1945 
1946 		/* During probe, this function will be called for each slave
1947 		 * device, while not all of them have been allocated. That's
1948 		 * ok, it doesn't change what the maximum is, so ignore it.
1949 		 */
1950 		if (!other_dp->slave)
1951 			continue;
1952 
1953 		/* Pretend that we already applied the setting, which we
1954 		 * actually haven't (still haven't done all integrity checks)
1955 		 */
1956 		if (dp == other_dp)
1957 			slave_mtu = new_mtu;
1958 		else
1959 			slave_mtu = other_dp->slave->mtu;
1960 
1961 		if (largest_mtu < slave_mtu)
1962 			largest_mtu = slave_mtu;
1963 	}
1964 
1965 	overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1966 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
1967 	old_master_mtu = master->mtu;
1968 	new_master_mtu = largest_mtu + overhead;
1969 	if (new_master_mtu > mtu_limit)
1970 		return -ERANGE;
1971 
1972 	/* If the master MTU isn't over limit, there's no need to check the CPU
1973 	 * MTU, since that surely isn't either.
1974 	 */
1975 	cpu_mtu = largest_mtu;
1976 
1977 	/* Start applying stuff */
1978 	if (new_master_mtu != old_master_mtu) {
1979 		err = dev_set_mtu(master, new_master_mtu);
1980 		if (err < 0)
1981 			goto out_master_failed;
1982 
1983 		/* We only need to propagate the MTU of the CPU port to
1984 		 * upstream switches, so emit a notifier which updates them.
1985 		 */
1986 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
1987 		if (err)
1988 			goto out_cpu_failed;
1989 	}
1990 
1991 	err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
1992 	if (err)
1993 		goto out_port_failed;
1994 
1995 	dev->mtu = new_mtu;
1996 
1997 	dsa_bridge_mtu_normalization(dp);
1998 
1999 	return 0;
2000 
2001 out_port_failed:
2002 	if (new_master_mtu != old_master_mtu)
2003 		dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
2004 out_cpu_failed:
2005 	if (new_master_mtu != old_master_mtu)
2006 		dev_set_mtu(master, old_master_mtu);
2007 out_master_failed:
2008 	return err;
2009 }
2010 
2011 static int __maybe_unused
2012 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
2013 {
2014 	struct dsa_port *dp = dsa_slave_to_port(dev);
2015 	struct dsa_switch *ds = dp->ds;
2016 	unsigned long mask, new_prio;
2017 	int err, port = dp->index;
2018 
2019 	if (!ds->ops->port_set_default_prio)
2020 		return -EOPNOTSUPP;
2021 
2022 	err = dcb_ieee_setapp(dev, app);
2023 	if (err)
2024 		return err;
2025 
2026 	mask = dcb_ieee_getapp_mask(dev, app);
2027 	new_prio = __fls(mask);
2028 
2029 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
2030 	if (err) {
2031 		dcb_ieee_delapp(dev, app);
2032 		return err;
2033 	}
2034 
2035 	return 0;
2036 }
2037 
2038 static int __maybe_unused
2039 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2040 {
2041 	struct dsa_port *dp = dsa_slave_to_port(dev);
2042 	struct dsa_switch *ds = dp->ds;
2043 	unsigned long mask, new_prio;
2044 	int err, port = dp->index;
2045 	u8 dscp = app->protocol;
2046 
2047 	if (!ds->ops->port_add_dscp_prio)
2048 		return -EOPNOTSUPP;
2049 
2050 	if (dscp >= 64) {
2051 		netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2052 			   dscp);
2053 		return -EINVAL;
2054 	}
2055 
2056 	err = dcb_ieee_setapp(dev, app);
2057 	if (err)
2058 		return err;
2059 
2060 	mask = dcb_ieee_getapp_mask(dev, app);
2061 	new_prio = __fls(mask);
2062 
2063 	err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2064 	if (err) {
2065 		dcb_ieee_delapp(dev, app);
2066 		return err;
2067 	}
2068 
2069 	return 0;
2070 }
2071 
2072 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
2073 						      struct dcb_app *app)
2074 {
2075 	switch (app->selector) {
2076 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2077 		switch (app->protocol) {
2078 		case 0:
2079 			return dsa_slave_dcbnl_set_default_prio(dev, app);
2080 		default:
2081 			return -EOPNOTSUPP;
2082 		}
2083 		break;
2084 	case IEEE_8021QAZ_APP_SEL_DSCP:
2085 		return dsa_slave_dcbnl_add_dscp_prio(dev, app);
2086 	default:
2087 		return -EOPNOTSUPP;
2088 	}
2089 }
2090 
2091 static int __maybe_unused
2092 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2093 {
2094 	struct dsa_port *dp = dsa_slave_to_port(dev);
2095 	struct dsa_switch *ds = dp->ds;
2096 	unsigned long mask, new_prio;
2097 	int err, port = dp->index;
2098 
2099 	if (!ds->ops->port_set_default_prio)
2100 		return -EOPNOTSUPP;
2101 
2102 	err = dcb_ieee_delapp(dev, app);
2103 	if (err)
2104 		return err;
2105 
2106 	mask = dcb_ieee_getapp_mask(dev, app);
2107 	new_prio = mask ? __fls(mask) : 0;
2108 
2109 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
2110 	if (err) {
2111 		dcb_ieee_setapp(dev, app);
2112 		return err;
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 static int __maybe_unused
2119 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2120 {
2121 	struct dsa_port *dp = dsa_slave_to_port(dev);
2122 	struct dsa_switch *ds = dp->ds;
2123 	int err, port = dp->index;
2124 	u8 dscp = app->protocol;
2125 
2126 	if (!ds->ops->port_del_dscp_prio)
2127 		return -EOPNOTSUPP;
2128 
2129 	err = dcb_ieee_delapp(dev, app);
2130 	if (err)
2131 		return err;
2132 
2133 	err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2134 	if (err) {
2135 		dcb_ieee_setapp(dev, app);
2136 		return err;
2137 	}
2138 
2139 	return 0;
2140 }
2141 
2142 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
2143 						      struct dcb_app *app)
2144 {
2145 	switch (app->selector) {
2146 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2147 		switch (app->protocol) {
2148 		case 0:
2149 			return dsa_slave_dcbnl_del_default_prio(dev, app);
2150 		default:
2151 			return -EOPNOTSUPP;
2152 		}
2153 		break;
2154 	case IEEE_8021QAZ_APP_SEL_DSCP:
2155 		return dsa_slave_dcbnl_del_dscp_prio(dev, app);
2156 	default:
2157 		return -EOPNOTSUPP;
2158 	}
2159 }
2160 
2161 /* Pre-populate the DCB application priority table with the priorities
2162  * configured during switch setup, which we read from hardware here.
2163  */
2164 static int dsa_slave_dcbnl_init(struct net_device *dev)
2165 {
2166 	struct dsa_port *dp = dsa_slave_to_port(dev);
2167 	struct dsa_switch *ds = dp->ds;
2168 	int port = dp->index;
2169 	int err;
2170 
2171 	if (ds->ops->port_get_default_prio) {
2172 		int prio = ds->ops->port_get_default_prio(ds, port);
2173 		struct dcb_app app = {
2174 			.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2175 			.protocol = 0,
2176 			.priority = prio,
2177 		};
2178 
2179 		if (prio < 0)
2180 			return prio;
2181 
2182 		err = dcb_ieee_setapp(dev, &app);
2183 		if (err)
2184 			return err;
2185 	}
2186 
2187 	if (ds->ops->port_get_dscp_prio) {
2188 		int protocol;
2189 
2190 		for (protocol = 0; protocol < 64; protocol++) {
2191 			struct dcb_app app = {
2192 				.selector = IEEE_8021QAZ_APP_SEL_DSCP,
2193 				.protocol = protocol,
2194 			};
2195 			int prio;
2196 
2197 			prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2198 			if (prio == -EOPNOTSUPP)
2199 				continue;
2200 			if (prio < 0)
2201 				return prio;
2202 
2203 			app.priority = prio;
2204 
2205 			err = dcb_ieee_setapp(dev, &app);
2206 			if (err)
2207 				return err;
2208 		}
2209 	}
2210 
2211 	return 0;
2212 }
2213 
2214 static const struct ethtool_ops dsa_slave_ethtool_ops = {
2215 	.get_drvinfo		= dsa_slave_get_drvinfo,
2216 	.get_regs_len		= dsa_slave_get_regs_len,
2217 	.get_regs		= dsa_slave_get_regs,
2218 	.nway_reset		= dsa_slave_nway_reset,
2219 	.get_link		= ethtool_op_get_link,
2220 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
2221 	.get_eeprom		= dsa_slave_get_eeprom,
2222 	.set_eeprom		= dsa_slave_set_eeprom,
2223 	.get_strings		= dsa_slave_get_strings,
2224 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
2225 	.get_sset_count		= dsa_slave_get_sset_count,
2226 	.get_eth_phy_stats	= dsa_slave_get_eth_phy_stats,
2227 	.get_eth_mac_stats	= dsa_slave_get_eth_mac_stats,
2228 	.get_eth_ctrl_stats	= dsa_slave_get_eth_ctrl_stats,
2229 	.get_rmon_stats		= dsa_slave_get_rmon_stats,
2230 	.set_wol		= dsa_slave_set_wol,
2231 	.get_wol		= dsa_slave_get_wol,
2232 	.set_eee		= dsa_slave_set_eee,
2233 	.get_eee		= dsa_slave_get_eee,
2234 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
2235 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
2236 	.get_pause_stats	= dsa_slave_get_pause_stats,
2237 	.get_pauseparam		= dsa_slave_get_pauseparam,
2238 	.set_pauseparam		= dsa_slave_set_pauseparam,
2239 	.get_rxnfc		= dsa_slave_get_rxnfc,
2240 	.set_rxnfc		= dsa_slave_set_rxnfc,
2241 	.get_ts_info		= dsa_slave_get_ts_info,
2242 	.self_test		= dsa_slave_net_selftest,
2243 	.get_mm			= dsa_slave_get_mm,
2244 	.set_mm			= dsa_slave_set_mm,
2245 	.get_mm_stats		= dsa_slave_get_mm_stats,
2246 };
2247 
2248 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
2249 	.ieee_setapp		= dsa_slave_dcbnl_ieee_setapp,
2250 	.ieee_delapp		= dsa_slave_dcbnl_ieee_delapp,
2251 };
2252 
2253 static void dsa_slave_get_stats64(struct net_device *dev,
2254 				  struct rtnl_link_stats64 *s)
2255 {
2256 	struct dsa_port *dp = dsa_slave_to_port(dev);
2257 	struct dsa_switch *ds = dp->ds;
2258 
2259 	if (ds->ops->get_stats64)
2260 		ds->ops->get_stats64(ds, dp->index, s);
2261 	else
2262 		dev_get_tstats64(dev, s);
2263 }
2264 
2265 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
2266 				       struct net_device_path *path)
2267 {
2268 	struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
2269 	struct net_device *master = dsa_port_to_master(dp);
2270 	struct dsa_port *cpu_dp = dp->cpu_dp;
2271 
2272 	path->dev = ctx->dev;
2273 	path->type = DEV_PATH_DSA;
2274 	path->dsa.proto = cpu_dp->tag_ops->proto;
2275 	path->dsa.port = dp->index;
2276 	ctx->dev = master;
2277 
2278 	return 0;
2279 }
2280 
2281 static const struct net_device_ops dsa_slave_netdev_ops = {
2282 	.ndo_open	 	= dsa_slave_open,
2283 	.ndo_stop		= dsa_slave_close,
2284 	.ndo_start_xmit		= dsa_slave_xmit,
2285 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
2286 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
2287 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
2288 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
2289 	.ndo_eth_ioctl		= dsa_slave_ioctl,
2290 	.ndo_get_iflink		= dsa_slave_get_iflink,
2291 #ifdef CONFIG_NET_POLL_CONTROLLER
2292 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
2293 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
2294 	.ndo_poll_controller	= dsa_slave_poll_controller,
2295 #endif
2296 	.ndo_setup_tc		= dsa_slave_setup_tc,
2297 	.ndo_get_stats64	= dsa_slave_get_stats64,
2298 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
2299 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
2300 	.ndo_change_mtu		= dsa_slave_change_mtu,
2301 	.ndo_fill_forward_path	= dsa_slave_fill_forward_path,
2302 };
2303 
2304 static struct device_type dsa_type = {
2305 	.name	= "dsa",
2306 };
2307 
2308 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2309 {
2310 	const struct dsa_port *dp = dsa_to_port(ds, port);
2311 
2312 	if (dp->pl)
2313 		phylink_mac_change(dp->pl, up);
2314 }
2315 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2316 
2317 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
2318 					  struct phylink_link_state *state)
2319 {
2320 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
2321 	struct dsa_switch *ds = dp->ds;
2322 
2323 	/* No need to check that this operation is valid, the callback would
2324 	 * not be called if it was not.
2325 	 */
2326 	ds->ops->phylink_fixed_state(ds, dp->index, state);
2327 }
2328 
2329 /* slave device setup *******************************************************/
2330 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
2331 				 u32 flags)
2332 {
2333 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2334 	struct dsa_switch *ds = dp->ds;
2335 
2336 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
2337 	if (!slave_dev->phydev) {
2338 		netdev_err(slave_dev, "no phy at %d\n", addr);
2339 		return -ENODEV;
2340 	}
2341 
2342 	slave_dev->phydev->dev_flags |= flags;
2343 
2344 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
2345 }
2346 
2347 static int dsa_slave_phy_setup(struct net_device *slave_dev)
2348 {
2349 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2350 	struct device_node *port_dn = dp->dn;
2351 	struct dsa_switch *ds = dp->ds;
2352 	u32 phy_flags = 0;
2353 	int ret;
2354 
2355 	dp->pl_config.dev = &slave_dev->dev;
2356 	dp->pl_config.type = PHYLINK_NETDEV;
2357 
2358 	/* The get_fixed_state callback takes precedence over polling the
2359 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
2360 	 * this if the switch provides such a callback.
2361 	 */
2362 	if (ds->ops->phylink_fixed_state) {
2363 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2364 		dp->pl_config.poll_fixed_state = true;
2365 	}
2366 
2367 	ret = dsa_port_phylink_create(dp);
2368 	if (ret)
2369 		return ret;
2370 
2371 	if (ds->ops->get_phy_flags)
2372 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2373 
2374 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2375 	if (ret == -ENODEV && ds->slave_mii_bus) {
2376 		/* We could not connect to a designated PHY or SFP, so try to
2377 		 * use the switch internal MDIO bus instead
2378 		 */
2379 		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2380 	}
2381 	if (ret) {
2382 		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2383 			   ERR_PTR(ret));
2384 		dsa_port_phylink_destroy(dp);
2385 	}
2386 
2387 	return ret;
2388 }
2389 
2390 void dsa_slave_setup_tagger(struct net_device *slave)
2391 {
2392 	struct dsa_port *dp = dsa_slave_to_port(slave);
2393 	struct net_device *master = dsa_port_to_master(dp);
2394 	struct dsa_slave_priv *p = netdev_priv(slave);
2395 	const struct dsa_port *cpu_dp = dp->cpu_dp;
2396 	const struct dsa_switch *ds = dp->ds;
2397 
2398 	slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2399 	slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2400 	/* Try to save one extra realloc later in the TX path (in the master)
2401 	 * by also inheriting the master's needed headroom and tailroom.
2402 	 * The 8021q driver also does this.
2403 	 */
2404 	slave->needed_headroom += master->needed_headroom;
2405 	slave->needed_tailroom += master->needed_tailroom;
2406 
2407 	p->xmit = cpu_dp->tag_ops->xmit;
2408 
2409 	slave->features = master->vlan_features | NETIF_F_HW_TC;
2410 	slave->hw_features |= NETIF_F_HW_TC;
2411 	slave->features |= NETIF_F_LLTX;
2412 	if (slave->needed_tailroom)
2413 		slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2414 	if (ds->needs_standalone_vlan_filtering)
2415 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2416 }
2417 
2418 int dsa_slave_suspend(struct net_device *slave_dev)
2419 {
2420 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2421 
2422 	if (!netif_running(slave_dev))
2423 		return 0;
2424 
2425 	netif_device_detach(slave_dev);
2426 
2427 	rtnl_lock();
2428 	phylink_stop(dp->pl);
2429 	rtnl_unlock();
2430 
2431 	return 0;
2432 }
2433 
2434 int dsa_slave_resume(struct net_device *slave_dev)
2435 {
2436 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2437 
2438 	if (!netif_running(slave_dev))
2439 		return 0;
2440 
2441 	netif_device_attach(slave_dev);
2442 
2443 	rtnl_lock();
2444 	phylink_start(dp->pl);
2445 	rtnl_unlock();
2446 
2447 	return 0;
2448 }
2449 
2450 int dsa_slave_create(struct dsa_port *port)
2451 {
2452 	struct net_device *master = dsa_port_to_master(port);
2453 	struct dsa_switch *ds = port->ds;
2454 	struct net_device *slave_dev;
2455 	struct dsa_slave_priv *p;
2456 	const char *name;
2457 	int assign_type;
2458 	int ret;
2459 
2460 	if (!ds->num_tx_queues)
2461 		ds->num_tx_queues = 1;
2462 
2463 	if (port->name) {
2464 		name = port->name;
2465 		assign_type = NET_NAME_PREDICTABLE;
2466 	} else {
2467 		name = "eth%d";
2468 		assign_type = NET_NAME_ENUM;
2469 	}
2470 
2471 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2472 				     assign_type, ether_setup,
2473 				     ds->num_tx_queues, 1);
2474 	if (slave_dev == NULL)
2475 		return -ENOMEM;
2476 
2477 	slave_dev->rtnl_link_ops = &dsa_link_ops;
2478 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2479 #if IS_ENABLED(CONFIG_DCB)
2480 	slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
2481 #endif
2482 	if (!is_zero_ether_addr(port->mac))
2483 		eth_hw_addr_set(slave_dev, port->mac);
2484 	else
2485 		eth_hw_addr_inherit(slave_dev, master);
2486 	slave_dev->priv_flags |= IFF_NO_QUEUE;
2487 	if (dsa_switch_supports_uc_filtering(ds))
2488 		slave_dev->priv_flags |= IFF_UNICAST_FLT;
2489 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2490 	if (ds->ops->port_max_mtu)
2491 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2492 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2493 
2494 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
2495 	SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
2496 	slave_dev->dev.of_node = port->dn;
2497 	slave_dev->vlan_features = master->vlan_features;
2498 
2499 	p = netdev_priv(slave_dev);
2500 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2501 	if (!slave_dev->tstats) {
2502 		free_netdev(slave_dev);
2503 		return -ENOMEM;
2504 	}
2505 
2506 	ret = gro_cells_init(&p->gcells, slave_dev);
2507 	if (ret)
2508 		goto out_free;
2509 
2510 	p->dp = port;
2511 	INIT_LIST_HEAD(&p->mall_tc_list);
2512 	port->slave = slave_dev;
2513 	dsa_slave_setup_tagger(slave_dev);
2514 
2515 	netif_carrier_off(slave_dev);
2516 
2517 	ret = dsa_slave_phy_setup(slave_dev);
2518 	if (ret) {
2519 		netdev_err(slave_dev,
2520 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
2521 			   ret, ds->dst->index, ds->index, port->index);
2522 		goto out_gcells;
2523 	}
2524 
2525 	rtnl_lock();
2526 
2527 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2528 	if (ret && ret != -EOPNOTSUPP)
2529 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2530 			 ret, ETH_DATA_LEN, port->index);
2531 
2532 	ret = register_netdevice(slave_dev);
2533 	if (ret) {
2534 		netdev_err(master, "error %d registering interface %s\n",
2535 			   ret, slave_dev->name);
2536 		rtnl_unlock();
2537 		goto out_phy;
2538 	}
2539 
2540 	if (IS_ENABLED(CONFIG_DCB)) {
2541 		ret = dsa_slave_dcbnl_init(slave_dev);
2542 		if (ret) {
2543 			netdev_err(slave_dev,
2544 				   "failed to initialize DCB: %pe\n",
2545 				   ERR_PTR(ret));
2546 			rtnl_unlock();
2547 			goto out_unregister;
2548 		}
2549 	}
2550 
2551 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
2552 
2553 	rtnl_unlock();
2554 
2555 	if (ret)
2556 		goto out_unregister;
2557 
2558 	return 0;
2559 
2560 out_unregister:
2561 	unregister_netdev(slave_dev);
2562 out_phy:
2563 	rtnl_lock();
2564 	phylink_disconnect_phy(p->dp->pl);
2565 	rtnl_unlock();
2566 	dsa_port_phylink_destroy(p->dp);
2567 out_gcells:
2568 	gro_cells_destroy(&p->gcells);
2569 out_free:
2570 	free_percpu(slave_dev->tstats);
2571 	free_netdev(slave_dev);
2572 	port->slave = NULL;
2573 	return ret;
2574 }
2575 
2576 void dsa_slave_destroy(struct net_device *slave_dev)
2577 {
2578 	struct net_device *master = dsa_slave_to_master(slave_dev);
2579 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2580 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
2581 
2582 	netif_carrier_off(slave_dev);
2583 	rtnl_lock();
2584 	netdev_upper_dev_unlink(master, slave_dev);
2585 	unregister_netdevice(slave_dev);
2586 	phylink_disconnect_phy(dp->pl);
2587 	rtnl_unlock();
2588 
2589 	dsa_port_phylink_destroy(dp);
2590 	gro_cells_destroy(&p->gcells);
2591 	free_percpu(slave_dev->tstats);
2592 	free_netdev(slave_dev);
2593 }
2594 
2595 int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
2596 			    struct netlink_ext_ack *extack)
2597 {
2598 	struct net_device *old_master = dsa_slave_to_master(dev);
2599 	struct dsa_port *dp = dsa_slave_to_port(dev);
2600 	struct dsa_switch *ds = dp->ds;
2601 	struct net_device *upper;
2602 	struct list_head *iter;
2603 	int err;
2604 
2605 	if (master == old_master)
2606 		return 0;
2607 
2608 	if (!ds->ops->port_change_master) {
2609 		NL_SET_ERR_MSG_MOD(extack,
2610 				   "Driver does not support changing DSA master");
2611 		return -EOPNOTSUPP;
2612 	}
2613 
2614 	if (!netdev_uses_dsa(master)) {
2615 		NL_SET_ERR_MSG_MOD(extack,
2616 				   "Interface not eligible as DSA master");
2617 		return -EOPNOTSUPP;
2618 	}
2619 
2620 	netdev_for_each_upper_dev_rcu(master, upper, iter) {
2621 		if (dsa_slave_dev_check(upper))
2622 			continue;
2623 		if (netif_is_bridge_master(upper))
2624 			continue;
2625 		NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
2626 		return -EOPNOTSUPP;
2627 	}
2628 
2629 	/* Since we allow live-changing the DSA master, plus we auto-open the
2630 	 * DSA master when the user port opens => we need to ensure that the
2631 	 * new DSA master is open too.
2632 	 */
2633 	if (dev->flags & IFF_UP) {
2634 		err = dev_open(master, extack);
2635 		if (err)
2636 			return err;
2637 	}
2638 
2639 	netdev_upper_dev_unlink(old_master, dev);
2640 
2641 	err = netdev_upper_dev_link(master, dev, extack);
2642 	if (err)
2643 		goto out_revert_old_master_unlink;
2644 
2645 	err = dsa_port_change_master(dp, master, extack);
2646 	if (err)
2647 		goto out_revert_master_link;
2648 
2649 	/* Update the MTU of the new CPU port through cross-chip notifiers */
2650 	err = dsa_slave_change_mtu(dev, dev->mtu);
2651 	if (err && err != -EOPNOTSUPP) {
2652 		netdev_warn(dev,
2653 			    "nonfatal error updating MTU with new master: %pe\n",
2654 			    ERR_PTR(err));
2655 	}
2656 
2657 	/* If the port doesn't have its own MAC address and relies on the DSA
2658 	 * master's one, inherit it again from the new DSA master.
2659 	 */
2660 	if (is_zero_ether_addr(dp->mac))
2661 		eth_hw_addr_inherit(dev, master);
2662 
2663 	return 0;
2664 
2665 out_revert_master_link:
2666 	netdev_upper_dev_unlink(master, dev);
2667 out_revert_old_master_unlink:
2668 	netdev_upper_dev_link(old_master, dev, NULL);
2669 	return err;
2670 }
2671 
2672 bool dsa_slave_dev_check(const struct net_device *dev)
2673 {
2674 	return dev->netdev_ops == &dsa_slave_netdev_ops;
2675 }
2676 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2677 
2678 static int dsa_slave_changeupper(struct net_device *dev,
2679 				 struct netdev_notifier_changeupper_info *info)
2680 {
2681 	struct dsa_port *dp = dsa_slave_to_port(dev);
2682 	struct netlink_ext_ack *extack;
2683 	int err = NOTIFY_DONE;
2684 
2685 	if (!dsa_slave_dev_check(dev))
2686 		return err;
2687 
2688 	extack = netdev_notifier_info_to_extack(&info->info);
2689 
2690 	if (netif_is_bridge_master(info->upper_dev)) {
2691 		if (info->linking) {
2692 			err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2693 			if (!err)
2694 				dsa_bridge_mtu_normalization(dp);
2695 			if (err == -EOPNOTSUPP) {
2696 				NL_SET_ERR_MSG_WEAK_MOD(extack,
2697 							"Offloading not supported");
2698 				err = 0;
2699 			}
2700 			err = notifier_from_errno(err);
2701 		} else {
2702 			dsa_port_bridge_leave(dp, info->upper_dev);
2703 			err = NOTIFY_OK;
2704 		}
2705 	} else if (netif_is_lag_master(info->upper_dev)) {
2706 		if (info->linking) {
2707 			err = dsa_port_lag_join(dp, info->upper_dev,
2708 						info->upper_info, extack);
2709 			if (err == -EOPNOTSUPP) {
2710 				NL_SET_ERR_MSG_WEAK_MOD(extack,
2711 							"Offloading not supported");
2712 				err = 0;
2713 			}
2714 			err = notifier_from_errno(err);
2715 		} else {
2716 			dsa_port_lag_leave(dp, info->upper_dev);
2717 			err = NOTIFY_OK;
2718 		}
2719 	} else if (is_hsr_master(info->upper_dev)) {
2720 		if (info->linking) {
2721 			err = dsa_port_hsr_join(dp, info->upper_dev);
2722 			if (err == -EOPNOTSUPP) {
2723 				NL_SET_ERR_MSG_WEAK_MOD(extack,
2724 							"Offloading not supported");
2725 				err = 0;
2726 			}
2727 			err = notifier_from_errno(err);
2728 		} else {
2729 			dsa_port_hsr_leave(dp, info->upper_dev);
2730 			err = NOTIFY_OK;
2731 		}
2732 	}
2733 
2734 	return err;
2735 }
2736 
2737 static int dsa_slave_prechangeupper(struct net_device *dev,
2738 				    struct netdev_notifier_changeupper_info *info)
2739 {
2740 	struct dsa_port *dp = dsa_slave_to_port(dev);
2741 
2742 	if (!dsa_slave_dev_check(dev))
2743 		return NOTIFY_DONE;
2744 
2745 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2746 		dsa_port_pre_bridge_leave(dp, info->upper_dev);
2747 	else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2748 		dsa_port_pre_lag_leave(dp, info->upper_dev);
2749 	/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2750 	 * meaningfully enslaved to a bridge yet
2751 	 */
2752 
2753 	return NOTIFY_DONE;
2754 }
2755 
2756 static int
2757 dsa_slave_lag_changeupper(struct net_device *dev,
2758 			  struct netdev_notifier_changeupper_info *info)
2759 {
2760 	struct net_device *lower;
2761 	struct list_head *iter;
2762 	int err = NOTIFY_DONE;
2763 	struct dsa_port *dp;
2764 
2765 	if (!netif_is_lag_master(dev))
2766 		return err;
2767 
2768 	netdev_for_each_lower_dev(dev, lower, iter) {
2769 		if (!dsa_slave_dev_check(lower))
2770 			continue;
2771 
2772 		dp = dsa_slave_to_port(lower);
2773 		if (!dp->lag)
2774 			/* Software LAG */
2775 			continue;
2776 
2777 		err = dsa_slave_changeupper(lower, info);
2778 		if (notifier_to_errno(err))
2779 			break;
2780 	}
2781 
2782 	return err;
2783 }
2784 
2785 /* Same as dsa_slave_lag_changeupper() except that it calls
2786  * dsa_slave_prechangeupper()
2787  */
2788 static int
2789 dsa_slave_lag_prechangeupper(struct net_device *dev,
2790 			     struct netdev_notifier_changeupper_info *info)
2791 {
2792 	struct net_device *lower;
2793 	struct list_head *iter;
2794 	int err = NOTIFY_DONE;
2795 	struct dsa_port *dp;
2796 
2797 	if (!netif_is_lag_master(dev))
2798 		return err;
2799 
2800 	netdev_for_each_lower_dev(dev, lower, iter) {
2801 		if (!dsa_slave_dev_check(lower))
2802 			continue;
2803 
2804 		dp = dsa_slave_to_port(lower);
2805 		if (!dp->lag)
2806 			/* Software LAG */
2807 			continue;
2808 
2809 		err = dsa_slave_prechangeupper(lower, info);
2810 		if (notifier_to_errno(err))
2811 			break;
2812 	}
2813 
2814 	return err;
2815 }
2816 
2817 static int
2818 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2819 				 struct netdev_notifier_changeupper_info *info)
2820 {
2821 	struct netlink_ext_ack *ext_ack;
2822 	struct net_device *slave, *br;
2823 	struct dsa_port *dp;
2824 
2825 	ext_ack = netdev_notifier_info_to_extack(&info->info);
2826 
2827 	if (!is_vlan_dev(dev))
2828 		return NOTIFY_DONE;
2829 
2830 	slave = vlan_dev_real_dev(dev);
2831 	if (!dsa_slave_dev_check(slave))
2832 		return NOTIFY_DONE;
2833 
2834 	dp = dsa_slave_to_port(slave);
2835 	br = dsa_port_bridge_dev_get(dp);
2836 	if (!br)
2837 		return NOTIFY_DONE;
2838 
2839 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2840 	if (br_vlan_enabled(br) &&
2841 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2842 		NL_SET_ERR_MSG_MOD(ext_ack,
2843 				   "Cannot enslave VLAN device into VLAN aware bridge");
2844 		return notifier_from_errno(-EINVAL);
2845 	}
2846 
2847 	return NOTIFY_DONE;
2848 }
2849 
2850 static int
2851 dsa_slave_check_8021q_upper(struct net_device *dev,
2852 			    struct netdev_notifier_changeupper_info *info)
2853 {
2854 	struct dsa_port *dp = dsa_slave_to_port(dev);
2855 	struct net_device *br = dsa_port_bridge_dev_get(dp);
2856 	struct bridge_vlan_info br_info;
2857 	struct netlink_ext_ack *extack;
2858 	int err = NOTIFY_DONE;
2859 	u16 vid;
2860 
2861 	if (!br || !br_vlan_enabled(br))
2862 		return NOTIFY_DONE;
2863 
2864 	extack = netdev_notifier_info_to_extack(&info->info);
2865 	vid = vlan_dev_vlan_id(info->upper_dev);
2866 
2867 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2868 	 * device, respectively the VID is not found, returning
2869 	 * 0 means success, which is a failure for us here.
2870 	 */
2871 	err = br_vlan_get_info(br, vid, &br_info);
2872 	if (err == 0) {
2873 		NL_SET_ERR_MSG_MOD(extack,
2874 				   "This VLAN is already configured by the bridge");
2875 		return notifier_from_errno(-EBUSY);
2876 	}
2877 
2878 	return NOTIFY_DONE;
2879 }
2880 
2881 static int
2882 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2883 				      struct netdev_notifier_changeupper_info *info)
2884 {
2885 	struct dsa_switch *ds;
2886 	struct dsa_port *dp;
2887 	int err;
2888 
2889 	if (!dsa_slave_dev_check(dev))
2890 		return dsa_prevent_bridging_8021q_upper(dev, info);
2891 
2892 	dp = dsa_slave_to_port(dev);
2893 	ds = dp->ds;
2894 
2895 	if (ds->ops->port_prechangeupper) {
2896 		err = ds->ops->port_prechangeupper(ds, dp->index, info);
2897 		if (err)
2898 			return notifier_from_errno(err);
2899 	}
2900 
2901 	if (is_vlan_dev(info->upper_dev))
2902 		return dsa_slave_check_8021q_upper(dev, info);
2903 
2904 	return NOTIFY_DONE;
2905 }
2906 
2907 /* To be eligible as a DSA master, a LAG must have all lower interfaces be
2908  * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
2909  * switches in the same switch tree.
2910  */
2911 static int dsa_lag_master_validate(struct net_device *lag_dev,
2912 				   struct netlink_ext_ack *extack)
2913 {
2914 	struct net_device *lower1, *lower2;
2915 	struct list_head *iter1, *iter2;
2916 
2917 	netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
2918 		netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
2919 			if (!netdev_uses_dsa(lower1) ||
2920 			    !netdev_uses_dsa(lower2)) {
2921 				NL_SET_ERR_MSG_MOD(extack,
2922 						   "All LAG ports must be eligible as DSA masters");
2923 				return notifier_from_errno(-EINVAL);
2924 			}
2925 
2926 			if (lower1 == lower2)
2927 				continue;
2928 
2929 			if (!dsa_port_tree_same(lower1->dsa_ptr,
2930 						lower2->dsa_ptr)) {
2931 				NL_SET_ERR_MSG_MOD(extack,
2932 						   "LAG contains DSA masters of disjoint switch trees");
2933 				return notifier_from_errno(-EINVAL);
2934 			}
2935 		}
2936 	}
2937 
2938 	return NOTIFY_DONE;
2939 }
2940 
2941 static int
2942 dsa_master_prechangeupper_sanity_check(struct net_device *master,
2943 				       struct netdev_notifier_changeupper_info *info)
2944 {
2945 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
2946 
2947 	if (!netdev_uses_dsa(master))
2948 		return NOTIFY_DONE;
2949 
2950 	if (!info->linking)
2951 		return NOTIFY_DONE;
2952 
2953 	/* Allow DSA switch uppers */
2954 	if (dsa_slave_dev_check(info->upper_dev))
2955 		return NOTIFY_DONE;
2956 
2957 	/* Allow bridge uppers of DSA masters, subject to further
2958 	 * restrictions in dsa_bridge_prechangelower_sanity_check()
2959 	 */
2960 	if (netif_is_bridge_master(info->upper_dev))
2961 		return NOTIFY_DONE;
2962 
2963 	/* Allow LAG uppers, subject to further restrictions in
2964 	 * dsa_lag_master_prechangelower_sanity_check()
2965 	 */
2966 	if (netif_is_lag_master(info->upper_dev))
2967 		return dsa_lag_master_validate(info->upper_dev, extack);
2968 
2969 	NL_SET_ERR_MSG_MOD(extack,
2970 			   "DSA master cannot join unknown upper interfaces");
2971 	return notifier_from_errno(-EBUSY);
2972 }
2973 
2974 static int
2975 dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
2976 					   struct netdev_notifier_changeupper_info *info)
2977 {
2978 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
2979 	struct net_device *lag_dev = info->upper_dev;
2980 	struct net_device *lower;
2981 	struct list_head *iter;
2982 
2983 	if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
2984 		return NOTIFY_DONE;
2985 
2986 	if (!info->linking)
2987 		return NOTIFY_DONE;
2988 
2989 	if (!netdev_uses_dsa(dev)) {
2990 		NL_SET_ERR_MSG(extack,
2991 			       "Only DSA masters can join a LAG DSA master");
2992 		return notifier_from_errno(-EINVAL);
2993 	}
2994 
2995 	netdev_for_each_lower_dev(lag_dev, lower, iter) {
2996 		if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
2997 			NL_SET_ERR_MSG(extack,
2998 				       "Interface is DSA master for a different switch tree than this LAG");
2999 			return notifier_from_errno(-EINVAL);
3000 		}
3001 
3002 		break;
3003 	}
3004 
3005 	return NOTIFY_DONE;
3006 }
3007 
3008 /* Don't allow bridging of DSA masters, since the bridge layer rx_handler
3009  * prevents the DSA fake ethertype handler to be invoked, so we don't get the
3010  * chance to strip off and parse the DSA switch tag protocol header (the bridge
3011  * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
3012  * frames).
3013  * The only case where that would not be an issue is when bridging can already
3014  * be offloaded, such as when the DSA master is itself a DSA or plain switchdev
3015  * port, and is bridged only with other ports from the same hardware device.
3016  */
3017 static int
3018 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
3019 				       struct netdev_notifier_changeupper_info *info)
3020 {
3021 	struct net_device *br = info->upper_dev;
3022 	struct netlink_ext_ack *extack;
3023 	struct net_device *lower;
3024 	struct list_head *iter;
3025 
3026 	if (!netif_is_bridge_master(br))
3027 		return NOTIFY_DONE;
3028 
3029 	if (!info->linking)
3030 		return NOTIFY_DONE;
3031 
3032 	extack = netdev_notifier_info_to_extack(&info->info);
3033 
3034 	netdev_for_each_lower_dev(br, lower, iter) {
3035 		if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
3036 			continue;
3037 
3038 		if (!netdev_port_same_parent_id(lower, new_lower)) {
3039 			NL_SET_ERR_MSG(extack,
3040 				       "Cannot do software bridging with a DSA master");
3041 			return notifier_from_errno(-EINVAL);
3042 		}
3043 	}
3044 
3045 	return NOTIFY_DONE;
3046 }
3047 
3048 static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
3049 						   struct net_device *lag_dev)
3050 {
3051 	struct net_device *new_master = dsa_tree_find_first_master(dst);
3052 	struct dsa_port *dp;
3053 	int err;
3054 
3055 	dsa_tree_for_each_user_port(dp, dst) {
3056 		if (dsa_port_to_master(dp) != lag_dev)
3057 			continue;
3058 
3059 		err = dsa_slave_change_master(dp->slave, new_master, NULL);
3060 		if (err) {
3061 			netdev_err(dp->slave,
3062 				   "failed to restore master to %s: %pe\n",
3063 				   new_master->name, ERR_PTR(err));
3064 		}
3065 	}
3066 }
3067 
3068 static int dsa_master_lag_join(struct net_device *master,
3069 			       struct net_device *lag_dev,
3070 			       struct netdev_lag_upper_info *uinfo,
3071 			       struct netlink_ext_ack *extack)
3072 {
3073 	struct dsa_port *cpu_dp = master->dsa_ptr;
3074 	struct dsa_switch_tree *dst = cpu_dp->dst;
3075 	struct dsa_port *dp;
3076 	int err;
3077 
3078 	err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3079 	if (err)
3080 		return err;
3081 
3082 	dsa_tree_for_each_user_port(dp, dst) {
3083 		if (dsa_port_to_master(dp) != master)
3084 			continue;
3085 
3086 		err = dsa_slave_change_master(dp->slave, lag_dev, extack);
3087 		if (err)
3088 			goto restore;
3089 	}
3090 
3091 	return 0;
3092 
3093 restore:
3094 	dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3095 		if (dsa_port_to_master(dp) != lag_dev)
3096 			continue;
3097 
3098 		err = dsa_slave_change_master(dp->slave, master, NULL);
3099 		if (err) {
3100 			netdev_err(dp->slave,
3101 				   "failed to restore master to %s: %pe\n",
3102 				   master->name, ERR_PTR(err));
3103 		}
3104 	}
3105 
3106 	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3107 
3108 	return err;
3109 }
3110 
3111 static void dsa_master_lag_leave(struct net_device *master,
3112 				 struct net_device *lag_dev)
3113 {
3114 	struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3115 	struct dsa_switch_tree *dst = cpu_dp->dst;
3116 	struct dsa_port *new_cpu_dp = NULL;
3117 	struct net_device *lower;
3118 	struct list_head *iter;
3119 
3120 	netdev_for_each_lower_dev(lag_dev, lower, iter) {
3121 		if (netdev_uses_dsa(lower)) {
3122 			new_cpu_dp = lower->dsa_ptr;
3123 			break;
3124 		}
3125 	}
3126 
3127 	if (new_cpu_dp) {
3128 		/* Update the CPU port of the user ports still under the LAG
3129 		 * so that dsa_port_to_master() continues to work properly
3130 		 */
3131 		dsa_tree_for_each_user_port(dp, dst)
3132 			if (dsa_port_to_master(dp) == lag_dev)
3133 				dp->cpu_dp = new_cpu_dp;
3134 
3135 		/* Update the index of the virtual CPU port to match the lowest
3136 		 * physical CPU port
3137 		 */
3138 		lag_dev->dsa_ptr = new_cpu_dp;
3139 		wmb();
3140 	} else {
3141 		/* If the LAG DSA master has no ports left, migrate back all
3142 		 * user ports to the first physical CPU port
3143 		 */
3144 		dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
3145 	}
3146 
3147 	/* This DSA master has left its LAG in any case, so let
3148 	 * the CPU port leave the hardware LAG as well
3149 	 */
3150 	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3151 }
3152 
3153 static int dsa_master_changeupper(struct net_device *dev,
3154 				  struct netdev_notifier_changeupper_info *info)
3155 {
3156 	struct netlink_ext_ack *extack;
3157 	int err = NOTIFY_DONE;
3158 
3159 	if (!netdev_uses_dsa(dev))
3160 		return err;
3161 
3162 	extack = netdev_notifier_info_to_extack(&info->info);
3163 
3164 	if (netif_is_lag_master(info->upper_dev)) {
3165 		if (info->linking) {
3166 			err = dsa_master_lag_join(dev, info->upper_dev,
3167 						  info->upper_info, extack);
3168 			err = notifier_from_errno(err);
3169 		} else {
3170 			dsa_master_lag_leave(dev, info->upper_dev);
3171 			err = NOTIFY_OK;
3172 		}
3173 	}
3174 
3175 	return err;
3176 }
3177 
3178 static int dsa_slave_netdevice_event(struct notifier_block *nb,
3179 				     unsigned long event, void *ptr)
3180 {
3181 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3182 
3183 	switch (event) {
3184 	case NETDEV_PRECHANGEUPPER: {
3185 		struct netdev_notifier_changeupper_info *info = ptr;
3186 		int err;
3187 
3188 		err = dsa_slave_prechangeupper_sanity_check(dev, info);
3189 		if (notifier_to_errno(err))
3190 			return err;
3191 
3192 		err = dsa_master_prechangeupper_sanity_check(dev, info);
3193 		if (notifier_to_errno(err))
3194 			return err;
3195 
3196 		err = dsa_lag_master_prechangelower_sanity_check(dev, info);
3197 		if (notifier_to_errno(err))
3198 			return err;
3199 
3200 		err = dsa_bridge_prechangelower_sanity_check(dev, info);
3201 		if (notifier_to_errno(err))
3202 			return err;
3203 
3204 		err = dsa_slave_prechangeupper(dev, ptr);
3205 		if (notifier_to_errno(err))
3206 			return err;
3207 
3208 		err = dsa_slave_lag_prechangeupper(dev, ptr);
3209 		if (notifier_to_errno(err))
3210 			return err;
3211 
3212 		break;
3213 	}
3214 	case NETDEV_CHANGEUPPER: {
3215 		int err;
3216 
3217 		err = dsa_slave_changeupper(dev, ptr);
3218 		if (notifier_to_errno(err))
3219 			return err;
3220 
3221 		err = dsa_slave_lag_changeupper(dev, ptr);
3222 		if (notifier_to_errno(err))
3223 			return err;
3224 
3225 		err = dsa_master_changeupper(dev, ptr);
3226 		if (notifier_to_errno(err))
3227 			return err;
3228 
3229 		break;
3230 	}
3231 	case NETDEV_CHANGELOWERSTATE: {
3232 		struct netdev_notifier_changelowerstate_info *info = ptr;
3233 		struct dsa_port *dp;
3234 		int err = 0;
3235 
3236 		if (dsa_slave_dev_check(dev)) {
3237 			dp = dsa_slave_to_port(dev);
3238 
3239 			err = dsa_port_lag_change(dp, info->lower_state_info);
3240 		}
3241 
3242 		/* Mirror LAG port events on DSA masters that are in
3243 		 * a LAG towards their respective switch CPU ports
3244 		 */
3245 		if (netdev_uses_dsa(dev)) {
3246 			dp = dev->dsa_ptr;
3247 
3248 			err = dsa_port_lag_change(dp, info->lower_state_info);
3249 		}
3250 
3251 		return notifier_from_errno(err);
3252 	}
3253 	case NETDEV_CHANGE:
3254 	case NETDEV_UP: {
3255 		/* Track state of master port.
3256 		 * DSA driver may require the master port (and indirectly
3257 		 * the tagger) to be available for some special operation.
3258 		 */
3259 		if (netdev_uses_dsa(dev)) {
3260 			struct dsa_port *cpu_dp = dev->dsa_ptr;
3261 			struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3262 
3263 			/* Track when the master port is UP */
3264 			dsa_tree_master_oper_state_change(dst, dev,
3265 							  netif_oper_up(dev));
3266 
3267 			/* Track when the master port is ready and can accept
3268 			 * packet.
3269 			 * NETDEV_UP event is not enough to flag a port as ready.
3270 			 * We also have to wait for linkwatch_do_dev to dev_activate
3271 			 * and emit a NETDEV_CHANGE event.
3272 			 * We check if a master port is ready by checking if the dev
3273 			 * have a qdisc assigned and is not noop.
3274 			 */
3275 			dsa_tree_master_admin_state_change(dst, dev,
3276 							   !qdisc_tx_is_noop(dev));
3277 
3278 			return NOTIFY_OK;
3279 		}
3280 
3281 		return NOTIFY_DONE;
3282 	}
3283 	case NETDEV_GOING_DOWN: {
3284 		struct dsa_port *dp, *cpu_dp;
3285 		struct dsa_switch_tree *dst;
3286 		LIST_HEAD(close_list);
3287 
3288 		if (!netdev_uses_dsa(dev))
3289 			return NOTIFY_DONE;
3290 
3291 		cpu_dp = dev->dsa_ptr;
3292 		dst = cpu_dp->ds->dst;
3293 
3294 		dsa_tree_master_admin_state_change(dst, dev, false);
3295 
3296 		list_for_each_entry(dp, &dst->ports, list) {
3297 			if (!dsa_port_is_user(dp))
3298 				continue;
3299 
3300 			if (dp->cpu_dp != cpu_dp)
3301 				continue;
3302 
3303 			list_add(&dp->slave->close_list, &close_list);
3304 		}
3305 
3306 		dev_close_many(&close_list, true);
3307 
3308 		return NOTIFY_OK;
3309 	}
3310 	default:
3311 		break;
3312 	}
3313 
3314 	return NOTIFY_DONE;
3315 }
3316 
3317 static void
3318 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3319 {
3320 	struct switchdev_notifier_fdb_info info = {};
3321 
3322 	info.addr = switchdev_work->addr;
3323 	info.vid = switchdev_work->vid;
3324 	info.offloaded = true;
3325 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3326 				 switchdev_work->orig_dev, &info.info, NULL);
3327 }
3328 
3329 static void dsa_slave_switchdev_event_work(struct work_struct *work)
3330 {
3331 	struct dsa_switchdev_event_work *switchdev_work =
3332 		container_of(work, struct dsa_switchdev_event_work, work);
3333 	const unsigned char *addr = switchdev_work->addr;
3334 	struct net_device *dev = switchdev_work->dev;
3335 	u16 vid = switchdev_work->vid;
3336 	struct dsa_switch *ds;
3337 	struct dsa_port *dp;
3338 	int err;
3339 
3340 	dp = dsa_slave_to_port(dev);
3341 	ds = dp->ds;
3342 
3343 	switch (switchdev_work->event) {
3344 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3345 		if (switchdev_work->host_addr)
3346 			err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3347 		else if (dp->lag)
3348 			err = dsa_port_lag_fdb_add(dp, addr, vid);
3349 		else
3350 			err = dsa_port_fdb_add(dp, addr, vid);
3351 		if (err) {
3352 			dev_err(ds->dev,
3353 				"port %d failed to add %pM vid %d to fdb: %d\n",
3354 				dp->index, addr, vid, err);
3355 			break;
3356 		}
3357 		dsa_fdb_offload_notify(switchdev_work);
3358 		break;
3359 
3360 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3361 		if (switchdev_work->host_addr)
3362 			err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3363 		else if (dp->lag)
3364 			err = dsa_port_lag_fdb_del(dp, addr, vid);
3365 		else
3366 			err = dsa_port_fdb_del(dp, addr, vid);
3367 		if (err) {
3368 			dev_err(ds->dev,
3369 				"port %d failed to delete %pM vid %d from fdb: %d\n",
3370 				dp->index, addr, vid, err);
3371 		}
3372 
3373 		break;
3374 	}
3375 
3376 	kfree(switchdev_work);
3377 }
3378 
3379 static bool dsa_foreign_dev_check(const struct net_device *dev,
3380 				  const struct net_device *foreign_dev)
3381 {
3382 	const struct dsa_port *dp = dsa_slave_to_port(dev);
3383 	struct dsa_switch_tree *dst = dp->ds->dst;
3384 
3385 	if (netif_is_bridge_master(foreign_dev))
3386 		return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3387 
3388 	if (netif_is_bridge_port(foreign_dev))
3389 		return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3390 
3391 	/* Everything else is foreign */
3392 	return true;
3393 }
3394 
3395 static int dsa_slave_fdb_event(struct net_device *dev,
3396 			       struct net_device *orig_dev,
3397 			       unsigned long event, const void *ctx,
3398 			       const struct switchdev_notifier_fdb_info *fdb_info)
3399 {
3400 	struct dsa_switchdev_event_work *switchdev_work;
3401 	struct dsa_port *dp = dsa_slave_to_port(dev);
3402 	bool host_addr = fdb_info->is_local;
3403 	struct dsa_switch *ds = dp->ds;
3404 
3405 	if (ctx && ctx != dp)
3406 		return 0;
3407 
3408 	if (!dp->bridge)
3409 		return 0;
3410 
3411 	if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3412 		if (dsa_port_offloads_bridge_port(dp, orig_dev))
3413 			return 0;
3414 
3415 		/* FDB entries learned by the software bridge or by foreign
3416 		 * bridge ports should be installed as host addresses only if
3417 		 * the driver requests assisted learning.
3418 		 */
3419 		if (!ds->assisted_learning_on_cpu_port)
3420 			return 0;
3421 	}
3422 
3423 	/* Also treat FDB entries on foreign interfaces bridged with us as host
3424 	 * addresses.
3425 	 */
3426 	if (dsa_foreign_dev_check(dev, orig_dev))
3427 		host_addr = true;
3428 
3429 	/* Check early that we're not doing work in vain.
3430 	 * Host addresses on LAG ports still require regular FDB ops,
3431 	 * since the CPU port isn't in a LAG.
3432 	 */
3433 	if (dp->lag && !host_addr) {
3434 		if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3435 			return -EOPNOTSUPP;
3436 	} else {
3437 		if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3438 			return -EOPNOTSUPP;
3439 	}
3440 
3441 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3442 	if (!switchdev_work)
3443 		return -ENOMEM;
3444 
3445 	netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3446 		   event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3447 		   orig_dev->name, fdb_info->addr, fdb_info->vid,
3448 		   host_addr ? " as host address" : "");
3449 
3450 	INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
3451 	switchdev_work->event = event;
3452 	switchdev_work->dev = dev;
3453 	switchdev_work->orig_dev = orig_dev;
3454 
3455 	ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3456 	switchdev_work->vid = fdb_info->vid;
3457 	switchdev_work->host_addr = host_addr;
3458 
3459 	dsa_schedule_work(&switchdev_work->work);
3460 
3461 	return 0;
3462 }
3463 
3464 /* Called under rcu_read_lock() */
3465 static int dsa_slave_switchdev_event(struct notifier_block *unused,
3466 				     unsigned long event, void *ptr)
3467 {
3468 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3469 	int err;
3470 
3471 	switch (event) {
3472 	case SWITCHDEV_PORT_ATTR_SET:
3473 		err = switchdev_handle_port_attr_set(dev, ptr,
3474 						     dsa_slave_dev_check,
3475 						     dsa_slave_port_attr_set);
3476 		return notifier_from_errno(err);
3477 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3478 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3479 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3480 							   dsa_slave_dev_check,
3481 							   dsa_foreign_dev_check,
3482 							   dsa_slave_fdb_event);
3483 		return notifier_from_errno(err);
3484 	default:
3485 		return NOTIFY_DONE;
3486 	}
3487 
3488 	return NOTIFY_OK;
3489 }
3490 
3491 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
3492 					      unsigned long event, void *ptr)
3493 {
3494 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3495 	int err;
3496 
3497 	switch (event) {
3498 	case SWITCHDEV_PORT_OBJ_ADD:
3499 		err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3500 							    dsa_slave_dev_check,
3501 							    dsa_foreign_dev_check,
3502 							    dsa_slave_port_obj_add);
3503 		return notifier_from_errno(err);
3504 	case SWITCHDEV_PORT_OBJ_DEL:
3505 		err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3506 							    dsa_slave_dev_check,
3507 							    dsa_foreign_dev_check,
3508 							    dsa_slave_port_obj_del);
3509 		return notifier_from_errno(err);
3510 	case SWITCHDEV_PORT_ATTR_SET:
3511 		err = switchdev_handle_port_attr_set(dev, ptr,
3512 						     dsa_slave_dev_check,
3513 						     dsa_slave_port_attr_set);
3514 		return notifier_from_errno(err);
3515 	}
3516 
3517 	return NOTIFY_DONE;
3518 }
3519 
3520 static struct notifier_block dsa_slave_nb __read_mostly = {
3521 	.notifier_call  = dsa_slave_netdevice_event,
3522 };
3523 
3524 struct notifier_block dsa_slave_switchdev_notifier = {
3525 	.notifier_call = dsa_slave_switchdev_event,
3526 };
3527 
3528 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
3529 	.notifier_call = dsa_slave_switchdev_blocking_event,
3530 };
3531 
3532 int dsa_slave_register_notifier(void)
3533 {
3534 	struct notifier_block *nb;
3535 	int err;
3536 
3537 	err = register_netdevice_notifier(&dsa_slave_nb);
3538 	if (err)
3539 		return err;
3540 
3541 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
3542 	if (err)
3543 		goto err_switchdev_nb;
3544 
3545 	nb = &dsa_slave_switchdev_blocking_notifier;
3546 	err = register_switchdev_blocking_notifier(nb);
3547 	if (err)
3548 		goto err_switchdev_blocking_nb;
3549 
3550 	return 0;
3551 
3552 err_switchdev_blocking_nb:
3553 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3554 err_switchdev_nb:
3555 	unregister_netdevice_notifier(&dsa_slave_nb);
3556 	return err;
3557 }
3558 
3559 void dsa_slave_unregister_notifier(void)
3560 {
3561 	struct notifier_block *nb;
3562 	int err;
3563 
3564 	nb = &dsa_slave_switchdev_blocking_notifier;
3565 	err = unregister_switchdev_blocking_notifier(nb);
3566 	if (err)
3567 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3568 
3569 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3570 	if (err)
3571 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3572 
3573 	err = unregister_netdevice_notifier(&dsa_slave_nb);
3574 	if (err)
3575 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
3576 }
3577