xref: /openbmc/linux/net/dsa/slave.c (revision 7f400a1d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24 
25 #include "dsa.h"
26 #include "port.h"
27 #include "master.h"
28 #include "netlink.h"
29 #include "slave.h"
30 #include "tag.h"
31 
32 struct dsa_switchdev_event_work {
33 	struct net_device *dev;
34 	struct net_device *orig_dev;
35 	struct work_struct work;
36 	unsigned long event;
37 	/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
38 	 * SWITCHDEV_FDB_DEL_TO_DEVICE
39 	 */
40 	unsigned char addr[ETH_ALEN];
41 	u16 vid;
42 	bool host_addr;
43 };
44 
45 enum dsa_standalone_event {
46 	DSA_UC_ADD,
47 	DSA_UC_DEL,
48 	DSA_MC_ADD,
49 	DSA_MC_DEL,
50 };
51 
52 struct dsa_standalone_event_work {
53 	struct work_struct work;
54 	struct net_device *dev;
55 	enum dsa_standalone_event event;
56 	unsigned char addr[ETH_ALEN];
57 	u16 vid;
58 };
59 
60 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
61 {
62 	return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
63 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
64 	       !ds->needs_standalone_vlan_filtering;
65 }
66 
67 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
68 {
69 	return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
70 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
71 	       !ds->needs_standalone_vlan_filtering;
72 }
73 
74 static void dsa_slave_standalone_event_work(struct work_struct *work)
75 {
76 	struct dsa_standalone_event_work *standalone_work =
77 		container_of(work, struct dsa_standalone_event_work, work);
78 	const unsigned char *addr = standalone_work->addr;
79 	struct net_device *dev = standalone_work->dev;
80 	struct dsa_port *dp = dsa_slave_to_port(dev);
81 	struct switchdev_obj_port_mdb mdb;
82 	struct dsa_switch *ds = dp->ds;
83 	u16 vid = standalone_work->vid;
84 	int err;
85 
86 	switch (standalone_work->event) {
87 	case DSA_UC_ADD:
88 		err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
89 		if (err) {
90 			dev_err(ds->dev,
91 				"port %d failed to add %pM vid %d to fdb: %d\n",
92 				dp->index, addr, vid, err);
93 			break;
94 		}
95 		break;
96 
97 	case DSA_UC_DEL:
98 		err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
99 		if (err) {
100 			dev_err(ds->dev,
101 				"port %d failed to delete %pM vid %d from fdb: %d\n",
102 				dp->index, addr, vid, err);
103 		}
104 
105 		break;
106 	case DSA_MC_ADD:
107 		ether_addr_copy(mdb.addr, addr);
108 		mdb.vid = vid;
109 
110 		err = dsa_port_standalone_host_mdb_add(dp, &mdb);
111 		if (err) {
112 			dev_err(ds->dev,
113 				"port %d failed to add %pM vid %d to mdb: %d\n",
114 				dp->index, addr, vid, err);
115 			break;
116 		}
117 		break;
118 	case DSA_MC_DEL:
119 		ether_addr_copy(mdb.addr, addr);
120 		mdb.vid = vid;
121 
122 		err = dsa_port_standalone_host_mdb_del(dp, &mdb);
123 		if (err) {
124 			dev_err(ds->dev,
125 				"port %d failed to delete %pM vid %d from mdb: %d\n",
126 				dp->index, addr, vid, err);
127 		}
128 
129 		break;
130 	}
131 
132 	kfree(standalone_work);
133 }
134 
135 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
136 					      enum dsa_standalone_event event,
137 					      const unsigned char *addr,
138 					      u16 vid)
139 {
140 	struct dsa_standalone_event_work *standalone_work;
141 
142 	standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
143 	if (!standalone_work)
144 		return -ENOMEM;
145 
146 	INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
147 	standalone_work->event = event;
148 	standalone_work->dev = dev;
149 
150 	ether_addr_copy(standalone_work->addr, addr);
151 	standalone_work->vid = vid;
152 
153 	dsa_schedule_work(&standalone_work->work);
154 
155 	return 0;
156 }
157 
158 static int dsa_slave_sync_uc(struct net_device *dev,
159 			     const unsigned char *addr)
160 {
161 	struct net_device *master = dsa_slave_to_master(dev);
162 	struct dsa_port *dp = dsa_slave_to_port(dev);
163 
164 	dev_uc_add(master, addr);
165 
166 	if (!dsa_switch_supports_uc_filtering(dp->ds))
167 		return 0;
168 
169 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
170 }
171 
172 static int dsa_slave_unsync_uc(struct net_device *dev,
173 			       const unsigned char *addr)
174 {
175 	struct net_device *master = dsa_slave_to_master(dev);
176 	struct dsa_port *dp = dsa_slave_to_port(dev);
177 
178 	dev_uc_del(master, addr);
179 
180 	if (!dsa_switch_supports_uc_filtering(dp->ds))
181 		return 0;
182 
183 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
184 }
185 
186 static int dsa_slave_sync_mc(struct net_device *dev,
187 			     const unsigned char *addr)
188 {
189 	struct net_device *master = dsa_slave_to_master(dev);
190 	struct dsa_port *dp = dsa_slave_to_port(dev);
191 
192 	dev_mc_add(master, addr);
193 
194 	if (!dsa_switch_supports_mc_filtering(dp->ds))
195 		return 0;
196 
197 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
198 }
199 
200 static int dsa_slave_unsync_mc(struct net_device *dev,
201 			       const unsigned char *addr)
202 {
203 	struct net_device *master = dsa_slave_to_master(dev);
204 	struct dsa_port *dp = dsa_slave_to_port(dev);
205 
206 	dev_mc_del(master, addr);
207 
208 	if (!dsa_switch_supports_mc_filtering(dp->ds))
209 		return 0;
210 
211 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
212 }
213 
214 void dsa_slave_sync_ha(struct net_device *dev)
215 {
216 	struct dsa_port *dp = dsa_slave_to_port(dev);
217 	struct dsa_switch *ds = dp->ds;
218 	struct netdev_hw_addr *ha;
219 
220 	netif_addr_lock_bh(dev);
221 
222 	netdev_for_each_synced_mc_addr(ha, dev)
223 		dsa_slave_sync_mc(dev, ha->addr);
224 
225 	netdev_for_each_synced_uc_addr(ha, dev)
226 		dsa_slave_sync_uc(dev, ha->addr);
227 
228 	netif_addr_unlock_bh(dev);
229 
230 	if (dsa_switch_supports_uc_filtering(ds) ||
231 	    dsa_switch_supports_mc_filtering(ds))
232 		dsa_flush_workqueue();
233 }
234 
235 void dsa_slave_unsync_ha(struct net_device *dev)
236 {
237 	struct dsa_port *dp = dsa_slave_to_port(dev);
238 	struct dsa_switch *ds = dp->ds;
239 	struct netdev_hw_addr *ha;
240 
241 	netif_addr_lock_bh(dev);
242 
243 	netdev_for_each_synced_uc_addr(ha, dev)
244 		dsa_slave_unsync_uc(dev, ha->addr);
245 
246 	netdev_for_each_synced_mc_addr(ha, dev)
247 		dsa_slave_unsync_mc(dev, ha->addr);
248 
249 	netif_addr_unlock_bh(dev);
250 
251 	if (dsa_switch_supports_uc_filtering(ds) ||
252 	    dsa_switch_supports_mc_filtering(ds))
253 		dsa_flush_workqueue();
254 }
255 
256 /* slave mii_bus handling ***************************************************/
257 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
258 {
259 	struct dsa_switch *ds = bus->priv;
260 
261 	if (ds->phys_mii_mask & (1 << addr))
262 		return ds->ops->phy_read(ds, addr, reg);
263 
264 	return 0xffff;
265 }
266 
267 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
268 {
269 	struct dsa_switch *ds = bus->priv;
270 
271 	if (ds->phys_mii_mask & (1 << addr))
272 		return ds->ops->phy_write(ds, addr, reg, val);
273 
274 	return 0;
275 }
276 
277 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
278 {
279 	ds->slave_mii_bus->priv = (void *)ds;
280 	ds->slave_mii_bus->name = "dsa slave smi";
281 	ds->slave_mii_bus->read = dsa_slave_phy_read;
282 	ds->slave_mii_bus->write = dsa_slave_phy_write;
283 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
284 		 ds->dst->index, ds->index);
285 	ds->slave_mii_bus->parent = ds->dev;
286 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
287 }
288 
289 
290 /* slave device handling ****************************************************/
291 static int dsa_slave_get_iflink(const struct net_device *dev)
292 {
293 	return dsa_slave_to_master(dev)->ifindex;
294 }
295 
296 static int dsa_slave_open(struct net_device *dev)
297 {
298 	struct net_device *master = dsa_slave_to_master(dev);
299 	struct dsa_port *dp = dsa_slave_to_port(dev);
300 	struct dsa_switch *ds = dp->ds;
301 	int err;
302 
303 	err = dev_open(master, NULL);
304 	if (err < 0) {
305 		netdev_err(dev, "failed to open master %s\n", master->name);
306 		goto out;
307 	}
308 
309 	if (dsa_switch_supports_uc_filtering(ds)) {
310 		err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
311 		if (err)
312 			goto out;
313 	}
314 
315 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
316 		err = dev_uc_add(master, dev->dev_addr);
317 		if (err < 0)
318 			goto del_host_addr;
319 	}
320 
321 	err = dsa_port_enable_rt(dp, dev->phydev);
322 	if (err)
323 		goto del_unicast;
324 
325 	return 0;
326 
327 del_unicast:
328 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
329 		dev_uc_del(master, dev->dev_addr);
330 del_host_addr:
331 	if (dsa_switch_supports_uc_filtering(ds))
332 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
333 out:
334 	return err;
335 }
336 
337 static int dsa_slave_close(struct net_device *dev)
338 {
339 	struct net_device *master = dsa_slave_to_master(dev);
340 	struct dsa_port *dp = dsa_slave_to_port(dev);
341 	struct dsa_switch *ds = dp->ds;
342 
343 	dsa_port_disable_rt(dp);
344 
345 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
346 		dev_uc_del(master, dev->dev_addr);
347 
348 	if (dsa_switch_supports_uc_filtering(ds))
349 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
350 
351 	return 0;
352 }
353 
354 static void dsa_slave_manage_host_flood(struct net_device *dev)
355 {
356 	bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
357 	struct dsa_port *dp = dsa_slave_to_port(dev);
358 	bool uc = dev->flags & IFF_PROMISC;
359 
360 	dsa_port_set_host_flood(dp, uc, mc);
361 }
362 
363 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
364 {
365 	struct net_device *master = dsa_slave_to_master(dev);
366 	struct dsa_port *dp = dsa_slave_to_port(dev);
367 	struct dsa_switch *ds = dp->ds;
368 
369 	if (change & IFF_ALLMULTI)
370 		dev_set_allmulti(master,
371 				 dev->flags & IFF_ALLMULTI ? 1 : -1);
372 	if (change & IFF_PROMISC)
373 		dev_set_promiscuity(master,
374 				    dev->flags & IFF_PROMISC ? 1 : -1);
375 
376 	if (dsa_switch_supports_uc_filtering(ds) &&
377 	    dsa_switch_supports_mc_filtering(ds))
378 		dsa_slave_manage_host_flood(dev);
379 }
380 
381 static void dsa_slave_set_rx_mode(struct net_device *dev)
382 {
383 	__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
384 	__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
385 }
386 
387 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
388 {
389 	struct net_device *master = dsa_slave_to_master(dev);
390 	struct dsa_port *dp = dsa_slave_to_port(dev);
391 	struct dsa_switch *ds = dp->ds;
392 	struct sockaddr *addr = a;
393 	int err;
394 
395 	if (!is_valid_ether_addr(addr->sa_data))
396 		return -EADDRNOTAVAIL;
397 
398 	/* If the port is down, the address isn't synced yet to hardware or
399 	 * to the DSA master, so there is nothing to change.
400 	 */
401 	if (!(dev->flags & IFF_UP))
402 		goto out_change_dev_addr;
403 
404 	if (dsa_switch_supports_uc_filtering(ds)) {
405 		err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
406 		if (err)
407 			return err;
408 	}
409 
410 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
411 		err = dev_uc_add(master, addr->sa_data);
412 		if (err < 0)
413 			goto del_unicast;
414 	}
415 
416 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
417 		dev_uc_del(master, dev->dev_addr);
418 
419 	if (dsa_switch_supports_uc_filtering(ds))
420 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
421 
422 out_change_dev_addr:
423 	eth_hw_addr_set(dev, addr->sa_data);
424 
425 	return 0;
426 
427 del_unicast:
428 	if (dsa_switch_supports_uc_filtering(ds))
429 		dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
430 
431 	return err;
432 }
433 
434 struct dsa_slave_dump_ctx {
435 	struct net_device *dev;
436 	struct sk_buff *skb;
437 	struct netlink_callback *cb;
438 	int idx;
439 };
440 
441 static int
442 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
443 			   bool is_static, void *data)
444 {
445 	struct dsa_slave_dump_ctx *dump = data;
446 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
447 	u32 seq = dump->cb->nlh->nlmsg_seq;
448 	struct nlmsghdr *nlh;
449 	struct ndmsg *ndm;
450 
451 	if (dump->idx < dump->cb->args[2])
452 		goto skip;
453 
454 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
455 			sizeof(*ndm), NLM_F_MULTI);
456 	if (!nlh)
457 		return -EMSGSIZE;
458 
459 	ndm = nlmsg_data(nlh);
460 	ndm->ndm_family  = AF_BRIDGE;
461 	ndm->ndm_pad1    = 0;
462 	ndm->ndm_pad2    = 0;
463 	ndm->ndm_flags   = NTF_SELF;
464 	ndm->ndm_type    = 0;
465 	ndm->ndm_ifindex = dump->dev->ifindex;
466 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
467 
468 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
469 		goto nla_put_failure;
470 
471 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
472 		goto nla_put_failure;
473 
474 	nlmsg_end(dump->skb, nlh);
475 
476 skip:
477 	dump->idx++;
478 	return 0;
479 
480 nla_put_failure:
481 	nlmsg_cancel(dump->skb, nlh);
482 	return -EMSGSIZE;
483 }
484 
485 static int
486 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
487 		   struct net_device *dev, struct net_device *filter_dev,
488 		   int *idx)
489 {
490 	struct dsa_port *dp = dsa_slave_to_port(dev);
491 	struct dsa_slave_dump_ctx dump = {
492 		.dev = dev,
493 		.skb = skb,
494 		.cb = cb,
495 		.idx = *idx,
496 	};
497 	int err;
498 
499 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
500 	*idx = dump.idx;
501 
502 	return err;
503 }
504 
505 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
506 {
507 	struct dsa_slave_priv *p = netdev_priv(dev);
508 	struct dsa_switch *ds = p->dp->ds;
509 	int port = p->dp->index;
510 
511 	/* Pass through to switch driver if it supports timestamping */
512 	switch (cmd) {
513 	case SIOCGHWTSTAMP:
514 		if (ds->ops->port_hwtstamp_get)
515 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
516 		break;
517 	case SIOCSHWTSTAMP:
518 		if (ds->ops->port_hwtstamp_set)
519 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
520 		break;
521 	}
522 
523 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
524 }
525 
526 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
527 				   const struct switchdev_attr *attr,
528 				   struct netlink_ext_ack *extack)
529 {
530 	struct dsa_port *dp = dsa_slave_to_port(dev);
531 	int ret;
532 
533 	if (ctx && ctx != dp)
534 		return 0;
535 
536 	switch (attr->id) {
537 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
538 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
539 			return -EOPNOTSUPP;
540 
541 		ret = dsa_port_set_state(dp, attr->u.stp_state, true);
542 		break;
543 	case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
544 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
545 			return -EOPNOTSUPP;
546 
547 		ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
548 		break;
549 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
550 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
551 			return -EOPNOTSUPP;
552 
553 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
554 					      extack);
555 		break;
556 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
557 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
558 			return -EOPNOTSUPP;
559 
560 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
561 		break;
562 	case SWITCHDEV_ATTR_ID_BRIDGE_MST:
563 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
564 			return -EOPNOTSUPP;
565 
566 		ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
567 		break;
568 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
569 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
570 			return -EOPNOTSUPP;
571 
572 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
573 						extack);
574 		break;
575 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
576 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
577 			return -EOPNOTSUPP;
578 
579 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
580 		break;
581 	case SWITCHDEV_ATTR_ID_VLAN_MSTI:
582 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
583 			return -EOPNOTSUPP;
584 
585 		ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
586 		break;
587 	default:
588 		ret = -EOPNOTSUPP;
589 		break;
590 	}
591 
592 	return ret;
593 }
594 
595 /* Must be called under rcu_read_lock() */
596 static int
597 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
598 				      const struct switchdev_obj_port_vlan *vlan)
599 {
600 	struct net_device *upper_dev;
601 	struct list_head *iter;
602 
603 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
604 		u16 vid;
605 
606 		if (!is_vlan_dev(upper_dev))
607 			continue;
608 
609 		vid = vlan_dev_vlan_id(upper_dev);
610 		if (vid == vlan->vid)
611 			return -EBUSY;
612 	}
613 
614 	return 0;
615 }
616 
617 static int dsa_slave_vlan_add(struct net_device *dev,
618 			      const struct switchdev_obj *obj,
619 			      struct netlink_ext_ack *extack)
620 {
621 	struct dsa_port *dp = dsa_slave_to_port(dev);
622 	struct switchdev_obj_port_vlan *vlan;
623 	int err;
624 
625 	if (dsa_port_skip_vlan_configuration(dp)) {
626 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
627 		return 0;
628 	}
629 
630 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
631 
632 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
633 	 * the same VID.
634 	 */
635 	if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
636 		rcu_read_lock();
637 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
638 		rcu_read_unlock();
639 		if (err) {
640 			NL_SET_ERR_MSG_MOD(extack,
641 					   "Port already has a VLAN upper with this VID");
642 			return err;
643 		}
644 	}
645 
646 	return dsa_port_vlan_add(dp, vlan, extack);
647 }
648 
649 /* Offload a VLAN installed on the bridge or on a foreign interface by
650  * installing it as a VLAN towards the CPU port.
651  */
652 static int dsa_slave_host_vlan_add(struct net_device *dev,
653 				   const struct switchdev_obj *obj,
654 				   struct netlink_ext_ack *extack)
655 {
656 	struct dsa_port *dp = dsa_slave_to_port(dev);
657 	struct switchdev_obj_port_vlan vlan;
658 
659 	/* Do nothing if this is a software bridge */
660 	if (!dp->bridge)
661 		return -EOPNOTSUPP;
662 
663 	if (dsa_port_skip_vlan_configuration(dp)) {
664 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
665 		return 0;
666 	}
667 
668 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
669 
670 	/* Even though drivers often handle CPU membership in special ways,
671 	 * it doesn't make sense to program a PVID, so clear this flag.
672 	 */
673 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
674 
675 	return dsa_port_host_vlan_add(dp, &vlan, extack);
676 }
677 
678 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
679 				  const struct switchdev_obj *obj,
680 				  struct netlink_ext_ack *extack)
681 {
682 	struct dsa_port *dp = dsa_slave_to_port(dev);
683 	int err;
684 
685 	if (ctx && ctx != dp)
686 		return 0;
687 
688 	switch (obj->id) {
689 	case SWITCHDEV_OBJ_ID_PORT_MDB:
690 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
691 			return -EOPNOTSUPP;
692 
693 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
694 		break;
695 	case SWITCHDEV_OBJ_ID_HOST_MDB:
696 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
697 			return -EOPNOTSUPP;
698 
699 		err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
700 		break;
701 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
702 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
703 			err = dsa_slave_vlan_add(dev, obj, extack);
704 		else
705 			err = dsa_slave_host_vlan_add(dev, obj, extack);
706 		break;
707 	case SWITCHDEV_OBJ_ID_MRP:
708 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
709 			return -EOPNOTSUPP;
710 
711 		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
712 		break;
713 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
714 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
715 			return -EOPNOTSUPP;
716 
717 		err = dsa_port_mrp_add_ring_role(dp,
718 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
719 		break;
720 	default:
721 		err = -EOPNOTSUPP;
722 		break;
723 	}
724 
725 	return err;
726 }
727 
728 static int dsa_slave_vlan_del(struct net_device *dev,
729 			      const struct switchdev_obj *obj)
730 {
731 	struct dsa_port *dp = dsa_slave_to_port(dev);
732 	struct switchdev_obj_port_vlan *vlan;
733 
734 	if (dsa_port_skip_vlan_configuration(dp))
735 		return 0;
736 
737 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
738 
739 	return dsa_port_vlan_del(dp, vlan);
740 }
741 
742 static int dsa_slave_host_vlan_del(struct net_device *dev,
743 				   const struct switchdev_obj *obj)
744 {
745 	struct dsa_port *dp = dsa_slave_to_port(dev);
746 	struct switchdev_obj_port_vlan *vlan;
747 
748 	/* Do nothing if this is a software bridge */
749 	if (!dp->bridge)
750 		return -EOPNOTSUPP;
751 
752 	if (dsa_port_skip_vlan_configuration(dp))
753 		return 0;
754 
755 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
756 
757 	return dsa_port_host_vlan_del(dp, vlan);
758 }
759 
760 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
761 				  const struct switchdev_obj *obj)
762 {
763 	struct dsa_port *dp = dsa_slave_to_port(dev);
764 	int err;
765 
766 	if (ctx && ctx != dp)
767 		return 0;
768 
769 	switch (obj->id) {
770 	case SWITCHDEV_OBJ_ID_PORT_MDB:
771 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
772 			return -EOPNOTSUPP;
773 
774 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
775 		break;
776 	case SWITCHDEV_OBJ_ID_HOST_MDB:
777 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
778 			return -EOPNOTSUPP;
779 
780 		err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
781 		break;
782 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
783 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
784 			err = dsa_slave_vlan_del(dev, obj);
785 		else
786 			err = dsa_slave_host_vlan_del(dev, obj);
787 		break;
788 	case SWITCHDEV_OBJ_ID_MRP:
789 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
790 			return -EOPNOTSUPP;
791 
792 		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
793 		break;
794 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
795 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
796 			return -EOPNOTSUPP;
797 
798 		err = dsa_port_mrp_del_ring_role(dp,
799 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
800 		break;
801 	default:
802 		err = -EOPNOTSUPP;
803 		break;
804 	}
805 
806 	return err;
807 }
808 
809 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
810 						     struct sk_buff *skb)
811 {
812 #ifdef CONFIG_NET_POLL_CONTROLLER
813 	struct dsa_slave_priv *p = netdev_priv(dev);
814 
815 	return netpoll_send_skb(p->netpoll, skb);
816 #else
817 	BUG();
818 	return NETDEV_TX_OK;
819 #endif
820 }
821 
822 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
823 				 struct sk_buff *skb)
824 {
825 	struct dsa_switch *ds = p->dp->ds;
826 
827 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
828 		return;
829 
830 	if (!ds->ops->port_txtstamp)
831 		return;
832 
833 	ds->ops->port_txtstamp(ds, p->dp->index, skb);
834 }
835 
836 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
837 {
838 	/* SKB for netpoll still need to be mangled with the protocol-specific
839 	 * tag to be successfully transmitted
840 	 */
841 	if (unlikely(netpoll_tx_running(dev)))
842 		return dsa_slave_netpoll_send_skb(dev, skb);
843 
844 	/* Queue the SKB for transmission on the parent interface, but
845 	 * do not modify its EtherType
846 	 */
847 	skb->dev = dsa_slave_to_master(dev);
848 	dev_queue_xmit(skb);
849 
850 	return NETDEV_TX_OK;
851 }
852 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
853 
854 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
855 {
856 	int needed_headroom = dev->needed_headroom;
857 	int needed_tailroom = dev->needed_tailroom;
858 
859 	/* For tail taggers, we need to pad short frames ourselves, to ensure
860 	 * that the tail tag does not fail at its role of being at the end of
861 	 * the packet, once the master interface pads the frame. Account for
862 	 * that pad length here, and pad later.
863 	 */
864 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
865 		needed_tailroom += ETH_ZLEN - skb->len;
866 	/* skb_headroom() returns unsigned int... */
867 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
868 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
869 
870 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
871 		/* No reallocation needed, yay! */
872 		return 0;
873 
874 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
875 				GFP_ATOMIC);
876 }
877 
878 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
879 {
880 	struct dsa_slave_priv *p = netdev_priv(dev);
881 	struct sk_buff *nskb;
882 
883 	dev_sw_netstats_tx_add(dev, 1, skb->len);
884 
885 	memset(skb->cb, 0, sizeof(skb->cb));
886 
887 	/* Handle tx timestamp if any */
888 	dsa_skb_tx_timestamp(p, skb);
889 
890 	if (dsa_realloc_skb(skb, dev)) {
891 		dev_kfree_skb_any(skb);
892 		return NETDEV_TX_OK;
893 	}
894 
895 	/* needed_tailroom should still be 'warm' in the cache line from
896 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
897 	 */
898 	if (dev->needed_tailroom)
899 		eth_skb_pad(skb);
900 
901 	/* Transmit function may have to reallocate the original SKB,
902 	 * in which case it must have freed it. Only free it here on error.
903 	 */
904 	nskb = p->xmit(skb, dev);
905 	if (!nskb) {
906 		kfree_skb(skb);
907 		return NETDEV_TX_OK;
908 	}
909 
910 	return dsa_enqueue_skb(nskb, dev);
911 }
912 
913 /* ethtool operations *******************************************************/
914 
915 static void dsa_slave_get_drvinfo(struct net_device *dev,
916 				  struct ethtool_drvinfo *drvinfo)
917 {
918 	strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
919 	strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
920 	strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
921 }
922 
923 static int dsa_slave_get_regs_len(struct net_device *dev)
924 {
925 	struct dsa_port *dp = dsa_slave_to_port(dev);
926 	struct dsa_switch *ds = dp->ds;
927 
928 	if (ds->ops->get_regs_len)
929 		return ds->ops->get_regs_len(ds, dp->index);
930 
931 	return -EOPNOTSUPP;
932 }
933 
934 static void
935 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
936 {
937 	struct dsa_port *dp = dsa_slave_to_port(dev);
938 	struct dsa_switch *ds = dp->ds;
939 
940 	if (ds->ops->get_regs)
941 		ds->ops->get_regs(ds, dp->index, regs, _p);
942 }
943 
944 static int dsa_slave_nway_reset(struct net_device *dev)
945 {
946 	struct dsa_port *dp = dsa_slave_to_port(dev);
947 
948 	return phylink_ethtool_nway_reset(dp->pl);
949 }
950 
951 static int dsa_slave_get_eeprom_len(struct net_device *dev)
952 {
953 	struct dsa_port *dp = dsa_slave_to_port(dev);
954 	struct dsa_switch *ds = dp->ds;
955 
956 	if (ds->cd && ds->cd->eeprom_len)
957 		return ds->cd->eeprom_len;
958 
959 	if (ds->ops->get_eeprom_len)
960 		return ds->ops->get_eeprom_len(ds);
961 
962 	return 0;
963 }
964 
965 static int dsa_slave_get_eeprom(struct net_device *dev,
966 				struct ethtool_eeprom *eeprom, u8 *data)
967 {
968 	struct dsa_port *dp = dsa_slave_to_port(dev);
969 	struct dsa_switch *ds = dp->ds;
970 
971 	if (ds->ops->get_eeprom)
972 		return ds->ops->get_eeprom(ds, eeprom, data);
973 
974 	return -EOPNOTSUPP;
975 }
976 
977 static int dsa_slave_set_eeprom(struct net_device *dev,
978 				struct ethtool_eeprom *eeprom, u8 *data)
979 {
980 	struct dsa_port *dp = dsa_slave_to_port(dev);
981 	struct dsa_switch *ds = dp->ds;
982 
983 	if (ds->ops->set_eeprom)
984 		return ds->ops->set_eeprom(ds, eeprom, data);
985 
986 	return -EOPNOTSUPP;
987 }
988 
989 static void dsa_slave_get_strings(struct net_device *dev,
990 				  uint32_t stringset, uint8_t *data)
991 {
992 	struct dsa_port *dp = dsa_slave_to_port(dev);
993 	struct dsa_switch *ds = dp->ds;
994 
995 	if (stringset == ETH_SS_STATS) {
996 		int len = ETH_GSTRING_LEN;
997 
998 		strncpy(data, "tx_packets", len);
999 		strncpy(data + len, "tx_bytes", len);
1000 		strncpy(data + 2 * len, "rx_packets", len);
1001 		strncpy(data + 3 * len, "rx_bytes", len);
1002 		if (ds->ops->get_strings)
1003 			ds->ops->get_strings(ds, dp->index, stringset,
1004 					     data + 4 * len);
1005 	} else if (stringset ==  ETH_SS_TEST) {
1006 		net_selftest_get_strings(data);
1007 	}
1008 
1009 }
1010 
1011 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
1012 					struct ethtool_stats *stats,
1013 					uint64_t *data)
1014 {
1015 	struct dsa_port *dp = dsa_slave_to_port(dev);
1016 	struct dsa_switch *ds = dp->ds;
1017 	struct pcpu_sw_netstats *s;
1018 	unsigned int start;
1019 	int i;
1020 
1021 	for_each_possible_cpu(i) {
1022 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1023 
1024 		s = per_cpu_ptr(dev->tstats, i);
1025 		do {
1026 			start = u64_stats_fetch_begin(&s->syncp);
1027 			tx_packets = u64_stats_read(&s->tx_packets);
1028 			tx_bytes = u64_stats_read(&s->tx_bytes);
1029 			rx_packets = u64_stats_read(&s->rx_packets);
1030 			rx_bytes = u64_stats_read(&s->rx_bytes);
1031 		} while (u64_stats_fetch_retry(&s->syncp, start));
1032 		data[0] += tx_packets;
1033 		data[1] += tx_bytes;
1034 		data[2] += rx_packets;
1035 		data[3] += rx_bytes;
1036 	}
1037 	if (ds->ops->get_ethtool_stats)
1038 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1039 }
1040 
1041 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
1042 {
1043 	struct dsa_port *dp = dsa_slave_to_port(dev);
1044 	struct dsa_switch *ds = dp->ds;
1045 
1046 	if (sset == ETH_SS_STATS) {
1047 		int count = 0;
1048 
1049 		if (ds->ops->get_sset_count) {
1050 			count = ds->ops->get_sset_count(ds, dp->index, sset);
1051 			if (count < 0)
1052 				return count;
1053 		}
1054 
1055 		return count + 4;
1056 	} else if (sset ==  ETH_SS_TEST) {
1057 		return net_selftest_get_count();
1058 	}
1059 
1060 	return -EOPNOTSUPP;
1061 }
1062 
1063 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
1064 					struct ethtool_eth_phy_stats *phy_stats)
1065 {
1066 	struct dsa_port *dp = dsa_slave_to_port(dev);
1067 	struct dsa_switch *ds = dp->ds;
1068 
1069 	if (ds->ops->get_eth_phy_stats)
1070 		ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1071 }
1072 
1073 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
1074 					struct ethtool_eth_mac_stats *mac_stats)
1075 {
1076 	struct dsa_port *dp = dsa_slave_to_port(dev);
1077 	struct dsa_switch *ds = dp->ds;
1078 
1079 	if (ds->ops->get_eth_mac_stats)
1080 		ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1081 }
1082 
1083 static void
1084 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
1085 			     struct ethtool_eth_ctrl_stats *ctrl_stats)
1086 {
1087 	struct dsa_port *dp = dsa_slave_to_port(dev);
1088 	struct dsa_switch *ds = dp->ds;
1089 
1090 	if (ds->ops->get_eth_ctrl_stats)
1091 		ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1092 }
1093 
1094 static void
1095 dsa_slave_get_rmon_stats(struct net_device *dev,
1096 			 struct ethtool_rmon_stats *rmon_stats,
1097 			 const struct ethtool_rmon_hist_range **ranges)
1098 {
1099 	struct dsa_port *dp = dsa_slave_to_port(dev);
1100 	struct dsa_switch *ds = dp->ds;
1101 
1102 	if (ds->ops->get_rmon_stats)
1103 		ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1104 }
1105 
1106 static void dsa_slave_net_selftest(struct net_device *ndev,
1107 				   struct ethtool_test *etest, u64 *buf)
1108 {
1109 	struct dsa_port *dp = dsa_slave_to_port(ndev);
1110 	struct dsa_switch *ds = dp->ds;
1111 
1112 	if (ds->ops->self_test) {
1113 		ds->ops->self_test(ds, dp->index, etest, buf);
1114 		return;
1115 	}
1116 
1117 	net_selftest(ndev, etest, buf);
1118 }
1119 
1120 static int dsa_slave_get_mm(struct net_device *dev,
1121 			    struct ethtool_mm_state *state)
1122 {
1123 	struct dsa_port *dp = dsa_slave_to_port(dev);
1124 	struct dsa_switch *ds = dp->ds;
1125 
1126 	if (!ds->ops->get_mm)
1127 		return -EOPNOTSUPP;
1128 
1129 	return ds->ops->get_mm(ds, dp->index, state);
1130 }
1131 
1132 static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
1133 			    struct netlink_ext_ack *extack)
1134 {
1135 	struct dsa_port *dp = dsa_slave_to_port(dev);
1136 	struct dsa_switch *ds = dp->ds;
1137 
1138 	if (!ds->ops->set_mm)
1139 		return -EOPNOTSUPP;
1140 
1141 	return ds->ops->set_mm(ds, dp->index, cfg, extack);
1142 }
1143 
1144 static void dsa_slave_get_mm_stats(struct net_device *dev,
1145 				   struct ethtool_mm_stats *stats)
1146 {
1147 	struct dsa_port *dp = dsa_slave_to_port(dev);
1148 	struct dsa_switch *ds = dp->ds;
1149 
1150 	if (ds->ops->get_mm_stats)
1151 		ds->ops->get_mm_stats(ds, dp->index, stats);
1152 }
1153 
1154 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1155 {
1156 	struct dsa_port *dp = dsa_slave_to_port(dev);
1157 	struct dsa_switch *ds = dp->ds;
1158 
1159 	phylink_ethtool_get_wol(dp->pl, w);
1160 
1161 	if (ds->ops->get_wol)
1162 		ds->ops->get_wol(ds, dp->index, w);
1163 }
1164 
1165 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1166 {
1167 	struct dsa_port *dp = dsa_slave_to_port(dev);
1168 	struct dsa_switch *ds = dp->ds;
1169 	int ret = -EOPNOTSUPP;
1170 
1171 	phylink_ethtool_set_wol(dp->pl, w);
1172 
1173 	if (ds->ops->set_wol)
1174 		ret = ds->ops->set_wol(ds, dp->index, w);
1175 
1176 	return ret;
1177 }
1178 
1179 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1180 {
1181 	struct dsa_port *dp = dsa_slave_to_port(dev);
1182 	struct dsa_switch *ds = dp->ds;
1183 	int ret;
1184 
1185 	/* Port's PHY and MAC both need to be EEE capable */
1186 	if (!dev->phydev || !dp->pl)
1187 		return -ENODEV;
1188 
1189 	if (!ds->ops->set_mac_eee)
1190 		return -EOPNOTSUPP;
1191 
1192 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
1193 	if (ret)
1194 		return ret;
1195 
1196 	return phylink_ethtool_set_eee(dp->pl, e);
1197 }
1198 
1199 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1200 {
1201 	struct dsa_port *dp = dsa_slave_to_port(dev);
1202 	struct dsa_switch *ds = dp->ds;
1203 	int ret;
1204 
1205 	/* Port's PHY and MAC both need to be EEE capable */
1206 	if (!dev->phydev || !dp->pl)
1207 		return -ENODEV;
1208 
1209 	if (!ds->ops->get_mac_eee)
1210 		return -EOPNOTSUPP;
1211 
1212 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
1213 	if (ret)
1214 		return ret;
1215 
1216 	return phylink_ethtool_get_eee(dp->pl, e);
1217 }
1218 
1219 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1220 					struct ethtool_link_ksettings *cmd)
1221 {
1222 	struct dsa_port *dp = dsa_slave_to_port(dev);
1223 
1224 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
1225 }
1226 
1227 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1228 					const struct ethtool_link_ksettings *cmd)
1229 {
1230 	struct dsa_port *dp = dsa_slave_to_port(dev);
1231 
1232 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
1233 }
1234 
1235 static void dsa_slave_get_pause_stats(struct net_device *dev,
1236 				  struct ethtool_pause_stats *pause_stats)
1237 {
1238 	struct dsa_port *dp = dsa_slave_to_port(dev);
1239 	struct dsa_switch *ds = dp->ds;
1240 
1241 	if (ds->ops->get_pause_stats)
1242 		ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1243 }
1244 
1245 static void dsa_slave_get_pauseparam(struct net_device *dev,
1246 				     struct ethtool_pauseparam *pause)
1247 {
1248 	struct dsa_port *dp = dsa_slave_to_port(dev);
1249 
1250 	phylink_ethtool_get_pauseparam(dp->pl, pause);
1251 }
1252 
1253 static int dsa_slave_set_pauseparam(struct net_device *dev,
1254 				    struct ethtool_pauseparam *pause)
1255 {
1256 	struct dsa_port *dp = dsa_slave_to_port(dev);
1257 
1258 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
1259 }
1260 
1261 #ifdef CONFIG_NET_POLL_CONTROLLER
1262 static int dsa_slave_netpoll_setup(struct net_device *dev,
1263 				   struct netpoll_info *ni)
1264 {
1265 	struct net_device *master = dsa_slave_to_master(dev);
1266 	struct dsa_slave_priv *p = netdev_priv(dev);
1267 	struct netpoll *netpoll;
1268 	int err = 0;
1269 
1270 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1271 	if (!netpoll)
1272 		return -ENOMEM;
1273 
1274 	err = __netpoll_setup(netpoll, master);
1275 	if (err) {
1276 		kfree(netpoll);
1277 		goto out;
1278 	}
1279 
1280 	p->netpoll = netpoll;
1281 out:
1282 	return err;
1283 }
1284 
1285 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1286 {
1287 	struct dsa_slave_priv *p = netdev_priv(dev);
1288 	struct netpoll *netpoll = p->netpoll;
1289 
1290 	if (!netpoll)
1291 		return;
1292 
1293 	p->netpoll = NULL;
1294 
1295 	__netpoll_free(netpoll);
1296 }
1297 
1298 static void dsa_slave_poll_controller(struct net_device *dev)
1299 {
1300 }
1301 #endif
1302 
1303 static struct dsa_mall_tc_entry *
1304 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1305 {
1306 	struct dsa_slave_priv *p = netdev_priv(dev);
1307 	struct dsa_mall_tc_entry *mall_tc_entry;
1308 
1309 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1310 		if (mall_tc_entry->cookie == cookie)
1311 			return mall_tc_entry;
1312 
1313 	return NULL;
1314 }
1315 
1316 static int
1317 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1318 				  struct tc_cls_matchall_offload *cls,
1319 				  bool ingress)
1320 {
1321 	struct netlink_ext_ack *extack = cls->common.extack;
1322 	struct dsa_port *dp = dsa_slave_to_port(dev);
1323 	struct dsa_slave_priv *p = netdev_priv(dev);
1324 	struct dsa_mall_mirror_tc_entry *mirror;
1325 	struct dsa_mall_tc_entry *mall_tc_entry;
1326 	struct dsa_switch *ds = dp->ds;
1327 	struct flow_action_entry *act;
1328 	struct dsa_port *to_dp;
1329 	int err;
1330 
1331 	if (!ds->ops->port_mirror_add)
1332 		return -EOPNOTSUPP;
1333 
1334 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1335 					      cls->common.extack))
1336 		return -EOPNOTSUPP;
1337 
1338 	act = &cls->rule->action.entries[0];
1339 
1340 	if (!act->dev)
1341 		return -EINVAL;
1342 
1343 	if (!dsa_slave_dev_check(act->dev))
1344 		return -EOPNOTSUPP;
1345 
1346 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1347 	if (!mall_tc_entry)
1348 		return -ENOMEM;
1349 
1350 	mall_tc_entry->cookie = cls->cookie;
1351 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1352 	mirror = &mall_tc_entry->mirror;
1353 
1354 	to_dp = dsa_slave_to_port(act->dev);
1355 
1356 	mirror->to_local_port = to_dp->index;
1357 	mirror->ingress = ingress;
1358 
1359 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1360 	if (err) {
1361 		kfree(mall_tc_entry);
1362 		return err;
1363 	}
1364 
1365 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1366 
1367 	return err;
1368 }
1369 
1370 static int
1371 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1372 				  struct tc_cls_matchall_offload *cls,
1373 				  bool ingress)
1374 {
1375 	struct netlink_ext_ack *extack = cls->common.extack;
1376 	struct dsa_port *dp = dsa_slave_to_port(dev);
1377 	struct dsa_slave_priv *p = netdev_priv(dev);
1378 	struct dsa_mall_policer_tc_entry *policer;
1379 	struct dsa_mall_tc_entry *mall_tc_entry;
1380 	struct dsa_switch *ds = dp->ds;
1381 	struct flow_action_entry *act;
1382 	int err;
1383 
1384 	if (!ds->ops->port_policer_add) {
1385 		NL_SET_ERR_MSG_MOD(extack,
1386 				   "Policing offload not implemented");
1387 		return -EOPNOTSUPP;
1388 	}
1389 
1390 	if (!ingress) {
1391 		NL_SET_ERR_MSG_MOD(extack,
1392 				   "Only supported on ingress qdisc");
1393 		return -EOPNOTSUPP;
1394 	}
1395 
1396 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1397 					      cls->common.extack))
1398 		return -EOPNOTSUPP;
1399 
1400 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1401 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1402 			NL_SET_ERR_MSG_MOD(extack,
1403 					   "Only one port policer allowed");
1404 			return -EEXIST;
1405 		}
1406 	}
1407 
1408 	act = &cls->rule->action.entries[0];
1409 
1410 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1411 	if (!mall_tc_entry)
1412 		return -ENOMEM;
1413 
1414 	mall_tc_entry->cookie = cls->cookie;
1415 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1416 	policer = &mall_tc_entry->policer;
1417 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1418 	policer->burst = act->police.burst;
1419 
1420 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1421 	if (err) {
1422 		kfree(mall_tc_entry);
1423 		return err;
1424 	}
1425 
1426 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1427 
1428 	return err;
1429 }
1430 
1431 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1432 				      struct tc_cls_matchall_offload *cls,
1433 				      bool ingress)
1434 {
1435 	int err = -EOPNOTSUPP;
1436 
1437 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1438 	    flow_offload_has_one_action(&cls->rule->action) &&
1439 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1440 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1441 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1442 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1443 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1444 
1445 	return err;
1446 }
1447 
1448 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1449 				       struct tc_cls_matchall_offload *cls)
1450 {
1451 	struct dsa_port *dp = dsa_slave_to_port(dev);
1452 	struct dsa_mall_tc_entry *mall_tc_entry;
1453 	struct dsa_switch *ds = dp->ds;
1454 
1455 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1456 	if (!mall_tc_entry)
1457 		return;
1458 
1459 	list_del(&mall_tc_entry->list);
1460 
1461 	switch (mall_tc_entry->type) {
1462 	case DSA_PORT_MALL_MIRROR:
1463 		if (ds->ops->port_mirror_del)
1464 			ds->ops->port_mirror_del(ds, dp->index,
1465 						 &mall_tc_entry->mirror);
1466 		break;
1467 	case DSA_PORT_MALL_POLICER:
1468 		if (ds->ops->port_policer_del)
1469 			ds->ops->port_policer_del(ds, dp->index);
1470 		break;
1471 	default:
1472 		WARN_ON(1);
1473 	}
1474 
1475 	kfree(mall_tc_entry);
1476 }
1477 
1478 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1479 					   struct tc_cls_matchall_offload *cls,
1480 					   bool ingress)
1481 {
1482 	if (cls->common.chain_index)
1483 		return -EOPNOTSUPP;
1484 
1485 	switch (cls->command) {
1486 	case TC_CLSMATCHALL_REPLACE:
1487 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1488 	case TC_CLSMATCHALL_DESTROY:
1489 		dsa_slave_del_cls_matchall(dev, cls);
1490 		return 0;
1491 	default:
1492 		return -EOPNOTSUPP;
1493 	}
1494 }
1495 
1496 static int dsa_slave_add_cls_flower(struct net_device *dev,
1497 				    struct flow_cls_offload *cls,
1498 				    bool ingress)
1499 {
1500 	struct dsa_port *dp = dsa_slave_to_port(dev);
1501 	struct dsa_switch *ds = dp->ds;
1502 	int port = dp->index;
1503 
1504 	if (!ds->ops->cls_flower_add)
1505 		return -EOPNOTSUPP;
1506 
1507 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1508 }
1509 
1510 static int dsa_slave_del_cls_flower(struct net_device *dev,
1511 				    struct flow_cls_offload *cls,
1512 				    bool ingress)
1513 {
1514 	struct dsa_port *dp = dsa_slave_to_port(dev);
1515 	struct dsa_switch *ds = dp->ds;
1516 	int port = dp->index;
1517 
1518 	if (!ds->ops->cls_flower_del)
1519 		return -EOPNOTSUPP;
1520 
1521 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1522 }
1523 
1524 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1525 				      struct flow_cls_offload *cls,
1526 				      bool ingress)
1527 {
1528 	struct dsa_port *dp = dsa_slave_to_port(dev);
1529 	struct dsa_switch *ds = dp->ds;
1530 	int port = dp->index;
1531 
1532 	if (!ds->ops->cls_flower_stats)
1533 		return -EOPNOTSUPP;
1534 
1535 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1536 }
1537 
1538 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1539 					 struct flow_cls_offload *cls,
1540 					 bool ingress)
1541 {
1542 	switch (cls->command) {
1543 	case FLOW_CLS_REPLACE:
1544 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1545 	case FLOW_CLS_DESTROY:
1546 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1547 	case FLOW_CLS_STATS:
1548 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1549 	default:
1550 		return -EOPNOTSUPP;
1551 	}
1552 }
1553 
1554 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1555 				       void *cb_priv, bool ingress)
1556 {
1557 	struct net_device *dev = cb_priv;
1558 
1559 	if (!tc_can_offload(dev))
1560 		return -EOPNOTSUPP;
1561 
1562 	switch (type) {
1563 	case TC_SETUP_CLSMATCHALL:
1564 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1565 	case TC_SETUP_CLSFLOWER:
1566 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1567 	default:
1568 		return -EOPNOTSUPP;
1569 	}
1570 }
1571 
1572 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1573 					  void *type_data, void *cb_priv)
1574 {
1575 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1576 }
1577 
1578 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1579 					  void *type_data, void *cb_priv)
1580 {
1581 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1582 }
1583 
1584 static LIST_HEAD(dsa_slave_block_cb_list);
1585 
1586 static int dsa_slave_setup_tc_block(struct net_device *dev,
1587 				    struct flow_block_offload *f)
1588 {
1589 	struct flow_block_cb *block_cb;
1590 	flow_setup_cb_t *cb;
1591 
1592 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1593 		cb = dsa_slave_setup_tc_block_cb_ig;
1594 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1595 		cb = dsa_slave_setup_tc_block_cb_eg;
1596 	else
1597 		return -EOPNOTSUPP;
1598 
1599 	f->driver_block_list = &dsa_slave_block_cb_list;
1600 
1601 	switch (f->command) {
1602 	case FLOW_BLOCK_BIND:
1603 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1604 			return -EBUSY;
1605 
1606 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1607 		if (IS_ERR(block_cb))
1608 			return PTR_ERR(block_cb);
1609 
1610 		flow_block_cb_add(block_cb, f);
1611 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1612 		return 0;
1613 	case FLOW_BLOCK_UNBIND:
1614 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1615 		if (!block_cb)
1616 			return -ENOENT;
1617 
1618 		flow_block_cb_remove(block_cb, f);
1619 		list_del(&block_cb->driver_list);
1620 		return 0;
1621 	default:
1622 		return -EOPNOTSUPP;
1623 	}
1624 }
1625 
1626 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1627 				    void *type_data)
1628 {
1629 	struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
1630 
1631 	if (!master->netdev_ops->ndo_setup_tc)
1632 		return -EOPNOTSUPP;
1633 
1634 	return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1635 }
1636 
1637 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1638 			      void *type_data)
1639 {
1640 	struct dsa_port *dp = dsa_slave_to_port(dev);
1641 	struct dsa_switch *ds = dp->ds;
1642 
1643 	switch (type) {
1644 	case TC_SETUP_BLOCK:
1645 		return dsa_slave_setup_tc_block(dev, type_data);
1646 	case TC_SETUP_FT:
1647 		return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1648 	default:
1649 		break;
1650 	}
1651 
1652 	if (!ds->ops->port_setup_tc)
1653 		return -EOPNOTSUPP;
1654 
1655 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1656 }
1657 
1658 static int dsa_slave_get_rxnfc(struct net_device *dev,
1659 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1660 {
1661 	struct dsa_port *dp = dsa_slave_to_port(dev);
1662 	struct dsa_switch *ds = dp->ds;
1663 
1664 	if (!ds->ops->get_rxnfc)
1665 		return -EOPNOTSUPP;
1666 
1667 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1668 }
1669 
1670 static int dsa_slave_set_rxnfc(struct net_device *dev,
1671 			       struct ethtool_rxnfc *nfc)
1672 {
1673 	struct dsa_port *dp = dsa_slave_to_port(dev);
1674 	struct dsa_switch *ds = dp->ds;
1675 
1676 	if (!ds->ops->set_rxnfc)
1677 		return -EOPNOTSUPP;
1678 
1679 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1680 }
1681 
1682 static int dsa_slave_get_ts_info(struct net_device *dev,
1683 				 struct ethtool_ts_info *ts)
1684 {
1685 	struct dsa_slave_priv *p = netdev_priv(dev);
1686 	struct dsa_switch *ds = p->dp->ds;
1687 
1688 	if (!ds->ops->get_ts_info)
1689 		return -EOPNOTSUPP;
1690 
1691 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1692 }
1693 
1694 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1695 				     u16 vid)
1696 {
1697 	struct dsa_port *dp = dsa_slave_to_port(dev);
1698 	struct switchdev_obj_port_vlan vlan = {
1699 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1700 		.vid = vid,
1701 		/* This API only allows programming tagged, non-PVID VIDs */
1702 		.flags = 0,
1703 	};
1704 	struct netlink_ext_ack extack = {0};
1705 	int ret;
1706 
1707 	/* User port... */
1708 	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1709 	if (ret) {
1710 		if (extack._msg)
1711 			netdev_err(dev, "%s\n", extack._msg);
1712 		return ret;
1713 	}
1714 
1715 	/* And CPU port... */
1716 	ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1717 	if (ret) {
1718 		if (extack._msg)
1719 			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1720 				   extack._msg);
1721 		return ret;
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1728 				      u16 vid)
1729 {
1730 	struct dsa_port *dp = dsa_slave_to_port(dev);
1731 	struct switchdev_obj_port_vlan vlan = {
1732 		.vid = vid,
1733 		/* This API only allows programming tagged, non-PVID VIDs */
1734 		.flags = 0,
1735 	};
1736 	int err;
1737 
1738 	err = dsa_port_vlan_del(dp, &vlan);
1739 	if (err)
1740 		return err;
1741 
1742 	return dsa_port_host_vlan_del(dp, &vlan);
1743 }
1744 
1745 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1746 {
1747 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1748 
1749 	return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1750 }
1751 
1752 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1753 {
1754 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1755 
1756 	return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1757 }
1758 
1759 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1760  * filtering is enabled. The baseline is that only ports that offload a
1761  * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1762  * but there are exceptions for quirky hardware.
1763  *
1764  * If ds->vlan_filtering_is_global = true, then standalone ports which share
1765  * the same switch with other ports that offload a VLAN-aware bridge are also
1766  * inevitably VLAN-aware.
1767  *
1768  * To summarize, a DSA switch port offloads:
1769  *
1770  * - If standalone (this includes software bridge, software LAG):
1771  *     - if ds->needs_standalone_vlan_filtering = true, OR if
1772  *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1773  *       this switch chip which have vlan_filtering=1)
1774  *         - the 8021q upper VLANs
1775  *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1776  *       global, or it is, but no port is under a VLAN-aware bridge):
1777  *         - no VLAN (any 8021q upper is a software VLAN)
1778  *
1779  * - If under a vlan_filtering=0 bridge which it offload:
1780  *     - if ds->configure_vlan_while_not_filtering = true (default):
1781  *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1782  *     - else (deprecated):
1783  *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1784  *           enabled, so this behavior is broken and discouraged.
1785  *
1786  * - If under a vlan_filtering=1 bridge which it offload:
1787  *     - the bridge VLANs
1788  *     - the 8021q upper VLANs
1789  */
1790 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1791 				    bool vlan_filtering)
1792 {
1793 	int err;
1794 
1795 	if (vlan_filtering) {
1796 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1797 
1798 		err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1799 		if (err) {
1800 			vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1801 			slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1802 			return err;
1803 		}
1804 	} else {
1805 		err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1806 		if (err)
1807 			return err;
1808 
1809 		slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1810 	}
1811 
1812 	return 0;
1813 }
1814 
1815 struct dsa_hw_port {
1816 	struct list_head list;
1817 	struct net_device *dev;
1818 	int old_mtu;
1819 };
1820 
1821 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1822 {
1823 	const struct dsa_hw_port *p;
1824 	int err;
1825 
1826 	list_for_each_entry(p, hw_port_list, list) {
1827 		if (p->dev->mtu == mtu)
1828 			continue;
1829 
1830 		err = dev_set_mtu(p->dev, mtu);
1831 		if (err)
1832 			goto rollback;
1833 	}
1834 
1835 	return 0;
1836 
1837 rollback:
1838 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1839 		if (p->dev->mtu == p->old_mtu)
1840 			continue;
1841 
1842 		if (dev_set_mtu(p->dev, p->old_mtu))
1843 			netdev_err(p->dev, "Failed to restore MTU\n");
1844 	}
1845 
1846 	return err;
1847 }
1848 
1849 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1850 {
1851 	struct dsa_hw_port *p, *n;
1852 
1853 	list_for_each_entry_safe(p, n, hw_port_list, list)
1854 		kfree(p);
1855 }
1856 
1857 /* Make the hardware datapath to/from @dev limited to a common MTU */
1858 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1859 {
1860 	struct list_head hw_port_list;
1861 	struct dsa_switch_tree *dst;
1862 	int min_mtu = ETH_MAX_MTU;
1863 	struct dsa_port *other_dp;
1864 	int err;
1865 
1866 	if (!dp->ds->mtu_enforcement_ingress)
1867 		return;
1868 
1869 	if (!dp->bridge)
1870 		return;
1871 
1872 	INIT_LIST_HEAD(&hw_port_list);
1873 
1874 	/* Populate the list of ports that are part of the same bridge
1875 	 * as the newly added/modified port
1876 	 */
1877 	list_for_each_entry(dst, &dsa_tree_list, list) {
1878 		list_for_each_entry(other_dp, &dst->ports, list) {
1879 			struct dsa_hw_port *hw_port;
1880 			struct net_device *slave;
1881 
1882 			if (other_dp->type != DSA_PORT_TYPE_USER)
1883 				continue;
1884 
1885 			if (!dsa_port_bridge_same(dp, other_dp))
1886 				continue;
1887 
1888 			if (!other_dp->ds->mtu_enforcement_ingress)
1889 				continue;
1890 
1891 			slave = other_dp->slave;
1892 
1893 			if (min_mtu > slave->mtu)
1894 				min_mtu = slave->mtu;
1895 
1896 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1897 			if (!hw_port)
1898 				goto out;
1899 
1900 			hw_port->dev = slave;
1901 			hw_port->old_mtu = slave->mtu;
1902 
1903 			list_add(&hw_port->list, &hw_port_list);
1904 		}
1905 	}
1906 
1907 	/* Attempt to configure the entire hardware bridge to the newly added
1908 	 * interface's MTU first, regardless of whether the intention of the
1909 	 * user was to raise or lower it.
1910 	 */
1911 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1912 	if (!err)
1913 		goto out;
1914 
1915 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1916 	 * all hardware bridge ports now. If this fails too, then all ports will
1917 	 * still have their old MTU rolled back anyway.
1918 	 */
1919 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1920 
1921 out:
1922 	dsa_hw_port_list_free(&hw_port_list);
1923 }
1924 
1925 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1926 {
1927 	struct net_device *master = dsa_slave_to_master(dev);
1928 	struct dsa_port *dp = dsa_slave_to_port(dev);
1929 	struct dsa_port *cpu_dp = dp->cpu_dp;
1930 	struct dsa_switch *ds = dp->ds;
1931 	struct dsa_port *other_dp;
1932 	int largest_mtu = 0;
1933 	int new_master_mtu;
1934 	int old_master_mtu;
1935 	int mtu_limit;
1936 	int cpu_mtu;
1937 	int err;
1938 
1939 	if (!ds->ops->port_change_mtu)
1940 		return -EOPNOTSUPP;
1941 
1942 	dsa_tree_for_each_user_port(other_dp, ds->dst) {
1943 		int slave_mtu;
1944 
1945 		/* During probe, this function will be called for each slave
1946 		 * device, while not all of them have been allocated. That's
1947 		 * ok, it doesn't change what the maximum is, so ignore it.
1948 		 */
1949 		if (!other_dp->slave)
1950 			continue;
1951 
1952 		/* Pretend that we already applied the setting, which we
1953 		 * actually haven't (still haven't done all integrity checks)
1954 		 */
1955 		if (dp == other_dp)
1956 			slave_mtu = new_mtu;
1957 		else
1958 			slave_mtu = other_dp->slave->mtu;
1959 
1960 		if (largest_mtu < slave_mtu)
1961 			largest_mtu = slave_mtu;
1962 	}
1963 
1964 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1965 	old_master_mtu = master->mtu;
1966 	new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1967 	if (new_master_mtu > mtu_limit)
1968 		return -ERANGE;
1969 
1970 	/* If the master MTU isn't over limit, there's no need to check the CPU
1971 	 * MTU, since that surely isn't either.
1972 	 */
1973 	cpu_mtu = largest_mtu;
1974 
1975 	/* Start applying stuff */
1976 	if (new_master_mtu != old_master_mtu) {
1977 		err = dev_set_mtu(master, new_master_mtu);
1978 		if (err < 0)
1979 			goto out_master_failed;
1980 
1981 		/* We only need to propagate the MTU of the CPU port to
1982 		 * upstream switches, so emit a notifier which updates them.
1983 		 */
1984 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
1985 		if (err)
1986 			goto out_cpu_failed;
1987 	}
1988 
1989 	err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
1990 	if (err)
1991 		goto out_port_failed;
1992 
1993 	dev->mtu = new_mtu;
1994 
1995 	dsa_bridge_mtu_normalization(dp);
1996 
1997 	return 0;
1998 
1999 out_port_failed:
2000 	if (new_master_mtu != old_master_mtu)
2001 		dsa_port_mtu_change(cpu_dp, old_master_mtu -
2002 				    dsa_tag_protocol_overhead(cpu_dp->tag_ops));
2003 out_cpu_failed:
2004 	if (new_master_mtu != old_master_mtu)
2005 		dev_set_mtu(master, old_master_mtu);
2006 out_master_failed:
2007 	return err;
2008 }
2009 
2010 static int __maybe_unused
2011 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
2012 {
2013 	struct dsa_port *dp = dsa_slave_to_port(dev);
2014 	struct dsa_switch *ds = dp->ds;
2015 	unsigned long mask, new_prio;
2016 	int err, port = dp->index;
2017 
2018 	if (!ds->ops->port_set_default_prio)
2019 		return -EOPNOTSUPP;
2020 
2021 	err = dcb_ieee_setapp(dev, app);
2022 	if (err)
2023 		return err;
2024 
2025 	mask = dcb_ieee_getapp_mask(dev, app);
2026 	new_prio = __fls(mask);
2027 
2028 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
2029 	if (err) {
2030 		dcb_ieee_delapp(dev, app);
2031 		return err;
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 static int __maybe_unused
2038 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2039 {
2040 	struct dsa_port *dp = dsa_slave_to_port(dev);
2041 	struct dsa_switch *ds = dp->ds;
2042 	unsigned long mask, new_prio;
2043 	int err, port = dp->index;
2044 	u8 dscp = app->protocol;
2045 
2046 	if (!ds->ops->port_add_dscp_prio)
2047 		return -EOPNOTSUPP;
2048 
2049 	if (dscp >= 64) {
2050 		netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2051 			   dscp);
2052 		return -EINVAL;
2053 	}
2054 
2055 	err = dcb_ieee_setapp(dev, app);
2056 	if (err)
2057 		return err;
2058 
2059 	mask = dcb_ieee_getapp_mask(dev, app);
2060 	new_prio = __fls(mask);
2061 
2062 	err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2063 	if (err) {
2064 		dcb_ieee_delapp(dev, app);
2065 		return err;
2066 	}
2067 
2068 	return 0;
2069 }
2070 
2071 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
2072 						      struct dcb_app *app)
2073 {
2074 	switch (app->selector) {
2075 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2076 		switch (app->protocol) {
2077 		case 0:
2078 			return dsa_slave_dcbnl_set_default_prio(dev, app);
2079 		default:
2080 			return -EOPNOTSUPP;
2081 		}
2082 		break;
2083 	case IEEE_8021QAZ_APP_SEL_DSCP:
2084 		return dsa_slave_dcbnl_add_dscp_prio(dev, app);
2085 	default:
2086 		return -EOPNOTSUPP;
2087 	}
2088 }
2089 
2090 static int __maybe_unused
2091 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2092 {
2093 	struct dsa_port *dp = dsa_slave_to_port(dev);
2094 	struct dsa_switch *ds = dp->ds;
2095 	unsigned long mask, new_prio;
2096 	int err, port = dp->index;
2097 
2098 	if (!ds->ops->port_set_default_prio)
2099 		return -EOPNOTSUPP;
2100 
2101 	err = dcb_ieee_delapp(dev, app);
2102 	if (err)
2103 		return err;
2104 
2105 	mask = dcb_ieee_getapp_mask(dev, app);
2106 	new_prio = mask ? __fls(mask) : 0;
2107 
2108 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
2109 	if (err) {
2110 		dcb_ieee_setapp(dev, app);
2111 		return err;
2112 	}
2113 
2114 	return 0;
2115 }
2116 
2117 static int __maybe_unused
2118 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2119 {
2120 	struct dsa_port *dp = dsa_slave_to_port(dev);
2121 	struct dsa_switch *ds = dp->ds;
2122 	int err, port = dp->index;
2123 	u8 dscp = app->protocol;
2124 
2125 	if (!ds->ops->port_del_dscp_prio)
2126 		return -EOPNOTSUPP;
2127 
2128 	err = dcb_ieee_delapp(dev, app);
2129 	if (err)
2130 		return err;
2131 
2132 	err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2133 	if (err) {
2134 		dcb_ieee_setapp(dev, app);
2135 		return err;
2136 	}
2137 
2138 	return 0;
2139 }
2140 
2141 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
2142 						      struct dcb_app *app)
2143 {
2144 	switch (app->selector) {
2145 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2146 		switch (app->protocol) {
2147 		case 0:
2148 			return dsa_slave_dcbnl_del_default_prio(dev, app);
2149 		default:
2150 			return -EOPNOTSUPP;
2151 		}
2152 		break;
2153 	case IEEE_8021QAZ_APP_SEL_DSCP:
2154 		return dsa_slave_dcbnl_del_dscp_prio(dev, app);
2155 	default:
2156 		return -EOPNOTSUPP;
2157 	}
2158 }
2159 
2160 /* Pre-populate the DCB application priority table with the priorities
2161  * configured during switch setup, which we read from hardware here.
2162  */
2163 static int dsa_slave_dcbnl_init(struct net_device *dev)
2164 {
2165 	struct dsa_port *dp = dsa_slave_to_port(dev);
2166 	struct dsa_switch *ds = dp->ds;
2167 	int port = dp->index;
2168 	int err;
2169 
2170 	if (ds->ops->port_get_default_prio) {
2171 		int prio = ds->ops->port_get_default_prio(ds, port);
2172 		struct dcb_app app = {
2173 			.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2174 			.protocol = 0,
2175 			.priority = prio,
2176 		};
2177 
2178 		if (prio < 0)
2179 			return prio;
2180 
2181 		err = dcb_ieee_setapp(dev, &app);
2182 		if (err)
2183 			return err;
2184 	}
2185 
2186 	if (ds->ops->port_get_dscp_prio) {
2187 		int protocol;
2188 
2189 		for (protocol = 0; protocol < 64; protocol++) {
2190 			struct dcb_app app = {
2191 				.selector = IEEE_8021QAZ_APP_SEL_DSCP,
2192 				.protocol = protocol,
2193 			};
2194 			int prio;
2195 
2196 			prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2197 			if (prio == -EOPNOTSUPP)
2198 				continue;
2199 			if (prio < 0)
2200 				return prio;
2201 
2202 			app.priority = prio;
2203 
2204 			err = dcb_ieee_setapp(dev, &app);
2205 			if (err)
2206 				return err;
2207 		}
2208 	}
2209 
2210 	return 0;
2211 }
2212 
2213 static const struct ethtool_ops dsa_slave_ethtool_ops = {
2214 	.get_drvinfo		= dsa_slave_get_drvinfo,
2215 	.get_regs_len		= dsa_slave_get_regs_len,
2216 	.get_regs		= dsa_slave_get_regs,
2217 	.nway_reset		= dsa_slave_nway_reset,
2218 	.get_link		= ethtool_op_get_link,
2219 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
2220 	.get_eeprom		= dsa_slave_get_eeprom,
2221 	.set_eeprom		= dsa_slave_set_eeprom,
2222 	.get_strings		= dsa_slave_get_strings,
2223 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
2224 	.get_sset_count		= dsa_slave_get_sset_count,
2225 	.get_eth_phy_stats	= dsa_slave_get_eth_phy_stats,
2226 	.get_eth_mac_stats	= dsa_slave_get_eth_mac_stats,
2227 	.get_eth_ctrl_stats	= dsa_slave_get_eth_ctrl_stats,
2228 	.get_rmon_stats		= dsa_slave_get_rmon_stats,
2229 	.set_wol		= dsa_slave_set_wol,
2230 	.get_wol		= dsa_slave_get_wol,
2231 	.set_eee		= dsa_slave_set_eee,
2232 	.get_eee		= dsa_slave_get_eee,
2233 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
2234 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
2235 	.get_pause_stats	= dsa_slave_get_pause_stats,
2236 	.get_pauseparam		= dsa_slave_get_pauseparam,
2237 	.set_pauseparam		= dsa_slave_set_pauseparam,
2238 	.get_rxnfc		= dsa_slave_get_rxnfc,
2239 	.set_rxnfc		= dsa_slave_set_rxnfc,
2240 	.get_ts_info		= dsa_slave_get_ts_info,
2241 	.self_test		= dsa_slave_net_selftest,
2242 	.get_mm			= dsa_slave_get_mm,
2243 	.set_mm			= dsa_slave_set_mm,
2244 	.get_mm_stats		= dsa_slave_get_mm_stats,
2245 };
2246 
2247 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
2248 	.ieee_setapp		= dsa_slave_dcbnl_ieee_setapp,
2249 	.ieee_delapp		= dsa_slave_dcbnl_ieee_delapp,
2250 };
2251 
2252 static void dsa_slave_get_stats64(struct net_device *dev,
2253 				  struct rtnl_link_stats64 *s)
2254 {
2255 	struct dsa_port *dp = dsa_slave_to_port(dev);
2256 	struct dsa_switch *ds = dp->ds;
2257 
2258 	if (ds->ops->get_stats64)
2259 		ds->ops->get_stats64(ds, dp->index, s);
2260 	else
2261 		dev_get_tstats64(dev, s);
2262 }
2263 
2264 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
2265 				       struct net_device_path *path)
2266 {
2267 	struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
2268 	struct net_device *master = dsa_port_to_master(dp);
2269 	struct dsa_port *cpu_dp = dp->cpu_dp;
2270 
2271 	path->dev = ctx->dev;
2272 	path->type = DEV_PATH_DSA;
2273 	path->dsa.proto = cpu_dp->tag_ops->proto;
2274 	path->dsa.port = dp->index;
2275 	ctx->dev = master;
2276 
2277 	return 0;
2278 }
2279 
2280 static const struct net_device_ops dsa_slave_netdev_ops = {
2281 	.ndo_open	 	= dsa_slave_open,
2282 	.ndo_stop		= dsa_slave_close,
2283 	.ndo_start_xmit		= dsa_slave_xmit,
2284 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
2285 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
2286 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
2287 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
2288 	.ndo_eth_ioctl		= dsa_slave_ioctl,
2289 	.ndo_get_iflink		= dsa_slave_get_iflink,
2290 #ifdef CONFIG_NET_POLL_CONTROLLER
2291 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
2292 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
2293 	.ndo_poll_controller	= dsa_slave_poll_controller,
2294 #endif
2295 	.ndo_setup_tc		= dsa_slave_setup_tc,
2296 	.ndo_get_stats64	= dsa_slave_get_stats64,
2297 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
2298 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
2299 	.ndo_change_mtu		= dsa_slave_change_mtu,
2300 	.ndo_fill_forward_path	= dsa_slave_fill_forward_path,
2301 };
2302 
2303 static struct device_type dsa_type = {
2304 	.name	= "dsa",
2305 };
2306 
2307 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2308 {
2309 	const struct dsa_port *dp = dsa_to_port(ds, port);
2310 
2311 	if (dp->pl)
2312 		phylink_mac_change(dp->pl, up);
2313 }
2314 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2315 
2316 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
2317 					  struct phylink_link_state *state)
2318 {
2319 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
2320 	struct dsa_switch *ds = dp->ds;
2321 
2322 	/* No need to check that this operation is valid, the callback would
2323 	 * not be called if it was not.
2324 	 */
2325 	ds->ops->phylink_fixed_state(ds, dp->index, state);
2326 }
2327 
2328 /* slave device setup *******************************************************/
2329 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
2330 				 u32 flags)
2331 {
2332 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2333 	struct dsa_switch *ds = dp->ds;
2334 
2335 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
2336 	if (!slave_dev->phydev) {
2337 		netdev_err(slave_dev, "no phy at %d\n", addr);
2338 		return -ENODEV;
2339 	}
2340 
2341 	slave_dev->phydev->dev_flags |= flags;
2342 
2343 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
2344 }
2345 
2346 static int dsa_slave_phy_setup(struct net_device *slave_dev)
2347 {
2348 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2349 	struct device_node *port_dn = dp->dn;
2350 	struct dsa_switch *ds = dp->ds;
2351 	u32 phy_flags = 0;
2352 	int ret;
2353 
2354 	dp->pl_config.dev = &slave_dev->dev;
2355 	dp->pl_config.type = PHYLINK_NETDEV;
2356 
2357 	/* The get_fixed_state callback takes precedence over polling the
2358 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
2359 	 * this if the switch provides such a callback.
2360 	 */
2361 	if (ds->ops->phylink_fixed_state) {
2362 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2363 		dp->pl_config.poll_fixed_state = true;
2364 	}
2365 
2366 	ret = dsa_port_phylink_create(dp);
2367 	if (ret)
2368 		return ret;
2369 
2370 	if (ds->ops->get_phy_flags)
2371 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2372 
2373 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2374 	if (ret == -ENODEV && ds->slave_mii_bus) {
2375 		/* We could not connect to a designated PHY or SFP, so try to
2376 		 * use the switch internal MDIO bus instead
2377 		 */
2378 		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2379 	}
2380 	if (ret) {
2381 		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2382 			   ERR_PTR(ret));
2383 		dsa_port_phylink_destroy(dp);
2384 	}
2385 
2386 	return ret;
2387 }
2388 
2389 void dsa_slave_setup_tagger(struct net_device *slave)
2390 {
2391 	struct dsa_port *dp = dsa_slave_to_port(slave);
2392 	struct net_device *master = dsa_port_to_master(dp);
2393 	struct dsa_slave_priv *p = netdev_priv(slave);
2394 	const struct dsa_port *cpu_dp = dp->cpu_dp;
2395 	const struct dsa_switch *ds = dp->ds;
2396 
2397 	slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2398 	slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2399 	/* Try to save one extra realloc later in the TX path (in the master)
2400 	 * by also inheriting the master's needed headroom and tailroom.
2401 	 * The 8021q driver also does this.
2402 	 */
2403 	slave->needed_headroom += master->needed_headroom;
2404 	slave->needed_tailroom += master->needed_tailroom;
2405 
2406 	p->xmit = cpu_dp->tag_ops->xmit;
2407 
2408 	slave->features = master->vlan_features | NETIF_F_HW_TC;
2409 	slave->hw_features |= NETIF_F_HW_TC;
2410 	slave->features |= NETIF_F_LLTX;
2411 	if (slave->needed_tailroom)
2412 		slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2413 	if (ds->needs_standalone_vlan_filtering)
2414 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2415 }
2416 
2417 int dsa_slave_suspend(struct net_device *slave_dev)
2418 {
2419 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2420 
2421 	if (!netif_running(slave_dev))
2422 		return 0;
2423 
2424 	netif_device_detach(slave_dev);
2425 
2426 	rtnl_lock();
2427 	phylink_stop(dp->pl);
2428 	rtnl_unlock();
2429 
2430 	return 0;
2431 }
2432 
2433 int dsa_slave_resume(struct net_device *slave_dev)
2434 {
2435 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2436 
2437 	if (!netif_running(slave_dev))
2438 		return 0;
2439 
2440 	netif_device_attach(slave_dev);
2441 
2442 	rtnl_lock();
2443 	phylink_start(dp->pl);
2444 	rtnl_unlock();
2445 
2446 	return 0;
2447 }
2448 
2449 int dsa_slave_create(struct dsa_port *port)
2450 {
2451 	struct net_device *master = dsa_port_to_master(port);
2452 	struct dsa_switch *ds = port->ds;
2453 	struct net_device *slave_dev;
2454 	struct dsa_slave_priv *p;
2455 	const char *name;
2456 	int assign_type;
2457 	int ret;
2458 
2459 	if (!ds->num_tx_queues)
2460 		ds->num_tx_queues = 1;
2461 
2462 	if (port->name) {
2463 		name = port->name;
2464 		assign_type = NET_NAME_PREDICTABLE;
2465 	} else {
2466 		name = "eth%d";
2467 		assign_type = NET_NAME_ENUM;
2468 	}
2469 
2470 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2471 				     assign_type, ether_setup,
2472 				     ds->num_tx_queues, 1);
2473 	if (slave_dev == NULL)
2474 		return -ENOMEM;
2475 
2476 	slave_dev->rtnl_link_ops = &dsa_link_ops;
2477 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2478 #if IS_ENABLED(CONFIG_DCB)
2479 	slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
2480 #endif
2481 	if (!is_zero_ether_addr(port->mac))
2482 		eth_hw_addr_set(slave_dev, port->mac);
2483 	else
2484 		eth_hw_addr_inherit(slave_dev, master);
2485 	slave_dev->priv_flags |= IFF_NO_QUEUE;
2486 	if (dsa_switch_supports_uc_filtering(ds))
2487 		slave_dev->priv_flags |= IFF_UNICAST_FLT;
2488 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2489 	if (ds->ops->port_max_mtu)
2490 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2491 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2492 
2493 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
2494 	SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
2495 	slave_dev->dev.of_node = port->dn;
2496 	slave_dev->vlan_features = master->vlan_features;
2497 
2498 	p = netdev_priv(slave_dev);
2499 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2500 	if (!slave_dev->tstats) {
2501 		free_netdev(slave_dev);
2502 		return -ENOMEM;
2503 	}
2504 
2505 	ret = gro_cells_init(&p->gcells, slave_dev);
2506 	if (ret)
2507 		goto out_free;
2508 
2509 	p->dp = port;
2510 	INIT_LIST_HEAD(&p->mall_tc_list);
2511 	port->slave = slave_dev;
2512 	dsa_slave_setup_tagger(slave_dev);
2513 
2514 	netif_carrier_off(slave_dev);
2515 
2516 	ret = dsa_slave_phy_setup(slave_dev);
2517 	if (ret) {
2518 		netdev_err(slave_dev,
2519 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
2520 			   ret, ds->dst->index, ds->index, port->index);
2521 		goto out_gcells;
2522 	}
2523 
2524 	rtnl_lock();
2525 
2526 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2527 	if (ret && ret != -EOPNOTSUPP)
2528 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2529 			 ret, ETH_DATA_LEN, port->index);
2530 
2531 	ret = register_netdevice(slave_dev);
2532 	if (ret) {
2533 		netdev_err(master, "error %d registering interface %s\n",
2534 			   ret, slave_dev->name);
2535 		rtnl_unlock();
2536 		goto out_phy;
2537 	}
2538 
2539 	if (IS_ENABLED(CONFIG_DCB)) {
2540 		ret = dsa_slave_dcbnl_init(slave_dev);
2541 		if (ret) {
2542 			netdev_err(slave_dev,
2543 				   "failed to initialize DCB: %pe\n",
2544 				   ERR_PTR(ret));
2545 			rtnl_unlock();
2546 			goto out_unregister;
2547 		}
2548 	}
2549 
2550 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
2551 
2552 	rtnl_unlock();
2553 
2554 	if (ret)
2555 		goto out_unregister;
2556 
2557 	return 0;
2558 
2559 out_unregister:
2560 	unregister_netdev(slave_dev);
2561 out_phy:
2562 	rtnl_lock();
2563 	phylink_disconnect_phy(p->dp->pl);
2564 	rtnl_unlock();
2565 	dsa_port_phylink_destroy(p->dp);
2566 out_gcells:
2567 	gro_cells_destroy(&p->gcells);
2568 out_free:
2569 	free_percpu(slave_dev->tstats);
2570 	free_netdev(slave_dev);
2571 	port->slave = NULL;
2572 	return ret;
2573 }
2574 
2575 void dsa_slave_destroy(struct net_device *slave_dev)
2576 {
2577 	struct net_device *master = dsa_slave_to_master(slave_dev);
2578 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2579 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
2580 
2581 	netif_carrier_off(slave_dev);
2582 	rtnl_lock();
2583 	netdev_upper_dev_unlink(master, slave_dev);
2584 	unregister_netdevice(slave_dev);
2585 	phylink_disconnect_phy(dp->pl);
2586 	rtnl_unlock();
2587 
2588 	dsa_port_phylink_destroy(dp);
2589 	gro_cells_destroy(&p->gcells);
2590 	free_percpu(slave_dev->tstats);
2591 	free_netdev(slave_dev);
2592 }
2593 
2594 int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
2595 			    struct netlink_ext_ack *extack)
2596 {
2597 	struct net_device *old_master = dsa_slave_to_master(dev);
2598 	struct dsa_port *dp = dsa_slave_to_port(dev);
2599 	struct dsa_switch *ds = dp->ds;
2600 	struct net_device *upper;
2601 	struct list_head *iter;
2602 	int err;
2603 
2604 	if (master == old_master)
2605 		return 0;
2606 
2607 	if (!ds->ops->port_change_master) {
2608 		NL_SET_ERR_MSG_MOD(extack,
2609 				   "Driver does not support changing DSA master");
2610 		return -EOPNOTSUPP;
2611 	}
2612 
2613 	if (!netdev_uses_dsa(master)) {
2614 		NL_SET_ERR_MSG_MOD(extack,
2615 				   "Interface not eligible as DSA master");
2616 		return -EOPNOTSUPP;
2617 	}
2618 
2619 	netdev_for_each_upper_dev_rcu(master, upper, iter) {
2620 		if (dsa_slave_dev_check(upper))
2621 			continue;
2622 		if (netif_is_bridge_master(upper))
2623 			continue;
2624 		NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
2625 		return -EOPNOTSUPP;
2626 	}
2627 
2628 	/* Since we allow live-changing the DSA master, plus we auto-open the
2629 	 * DSA master when the user port opens => we need to ensure that the
2630 	 * new DSA master is open too.
2631 	 */
2632 	if (dev->flags & IFF_UP) {
2633 		err = dev_open(master, extack);
2634 		if (err)
2635 			return err;
2636 	}
2637 
2638 	netdev_upper_dev_unlink(old_master, dev);
2639 
2640 	err = netdev_upper_dev_link(master, dev, extack);
2641 	if (err)
2642 		goto out_revert_old_master_unlink;
2643 
2644 	err = dsa_port_change_master(dp, master, extack);
2645 	if (err)
2646 		goto out_revert_master_link;
2647 
2648 	/* Update the MTU of the new CPU port through cross-chip notifiers */
2649 	err = dsa_slave_change_mtu(dev, dev->mtu);
2650 	if (err && err != -EOPNOTSUPP) {
2651 		netdev_warn(dev,
2652 			    "nonfatal error updating MTU with new master: %pe\n",
2653 			    ERR_PTR(err));
2654 	}
2655 
2656 	/* If the port doesn't have its own MAC address and relies on the DSA
2657 	 * master's one, inherit it again from the new DSA master.
2658 	 */
2659 	if (is_zero_ether_addr(dp->mac))
2660 		eth_hw_addr_inherit(dev, master);
2661 
2662 	return 0;
2663 
2664 out_revert_master_link:
2665 	netdev_upper_dev_unlink(master, dev);
2666 out_revert_old_master_unlink:
2667 	netdev_upper_dev_link(old_master, dev, NULL);
2668 	return err;
2669 }
2670 
2671 bool dsa_slave_dev_check(const struct net_device *dev)
2672 {
2673 	return dev->netdev_ops == &dsa_slave_netdev_ops;
2674 }
2675 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2676 
2677 static int dsa_slave_changeupper(struct net_device *dev,
2678 				 struct netdev_notifier_changeupper_info *info)
2679 {
2680 	struct dsa_port *dp = dsa_slave_to_port(dev);
2681 	struct netlink_ext_ack *extack;
2682 	int err = NOTIFY_DONE;
2683 
2684 	if (!dsa_slave_dev_check(dev))
2685 		return err;
2686 
2687 	extack = netdev_notifier_info_to_extack(&info->info);
2688 
2689 	if (netif_is_bridge_master(info->upper_dev)) {
2690 		if (info->linking) {
2691 			err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2692 			if (!err)
2693 				dsa_bridge_mtu_normalization(dp);
2694 			if (err == -EOPNOTSUPP) {
2695 				NL_SET_ERR_MSG_WEAK_MOD(extack,
2696 							"Offloading not supported");
2697 				err = 0;
2698 			}
2699 			err = notifier_from_errno(err);
2700 		} else {
2701 			dsa_port_bridge_leave(dp, info->upper_dev);
2702 			err = NOTIFY_OK;
2703 		}
2704 	} else if (netif_is_lag_master(info->upper_dev)) {
2705 		if (info->linking) {
2706 			err = dsa_port_lag_join(dp, info->upper_dev,
2707 						info->upper_info, extack);
2708 			if (err == -EOPNOTSUPP) {
2709 				NL_SET_ERR_MSG_WEAK_MOD(extack,
2710 							"Offloading not supported");
2711 				err = 0;
2712 			}
2713 			err = notifier_from_errno(err);
2714 		} else {
2715 			dsa_port_lag_leave(dp, info->upper_dev);
2716 			err = NOTIFY_OK;
2717 		}
2718 	} else if (is_hsr_master(info->upper_dev)) {
2719 		if (info->linking) {
2720 			err = dsa_port_hsr_join(dp, info->upper_dev);
2721 			if (err == -EOPNOTSUPP) {
2722 				NL_SET_ERR_MSG_WEAK_MOD(extack,
2723 							"Offloading not supported");
2724 				err = 0;
2725 			}
2726 			err = notifier_from_errno(err);
2727 		} else {
2728 			dsa_port_hsr_leave(dp, info->upper_dev);
2729 			err = NOTIFY_OK;
2730 		}
2731 	}
2732 
2733 	return err;
2734 }
2735 
2736 static int dsa_slave_prechangeupper(struct net_device *dev,
2737 				    struct netdev_notifier_changeupper_info *info)
2738 {
2739 	struct dsa_port *dp = dsa_slave_to_port(dev);
2740 
2741 	if (!dsa_slave_dev_check(dev))
2742 		return NOTIFY_DONE;
2743 
2744 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2745 		dsa_port_pre_bridge_leave(dp, info->upper_dev);
2746 	else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2747 		dsa_port_pre_lag_leave(dp, info->upper_dev);
2748 	/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2749 	 * meaningfully enslaved to a bridge yet
2750 	 */
2751 
2752 	return NOTIFY_DONE;
2753 }
2754 
2755 static int
2756 dsa_slave_lag_changeupper(struct net_device *dev,
2757 			  struct netdev_notifier_changeupper_info *info)
2758 {
2759 	struct net_device *lower;
2760 	struct list_head *iter;
2761 	int err = NOTIFY_DONE;
2762 	struct dsa_port *dp;
2763 
2764 	if (!netif_is_lag_master(dev))
2765 		return err;
2766 
2767 	netdev_for_each_lower_dev(dev, lower, iter) {
2768 		if (!dsa_slave_dev_check(lower))
2769 			continue;
2770 
2771 		dp = dsa_slave_to_port(lower);
2772 		if (!dp->lag)
2773 			/* Software LAG */
2774 			continue;
2775 
2776 		err = dsa_slave_changeupper(lower, info);
2777 		if (notifier_to_errno(err))
2778 			break;
2779 	}
2780 
2781 	return err;
2782 }
2783 
2784 /* Same as dsa_slave_lag_changeupper() except that it calls
2785  * dsa_slave_prechangeupper()
2786  */
2787 static int
2788 dsa_slave_lag_prechangeupper(struct net_device *dev,
2789 			     struct netdev_notifier_changeupper_info *info)
2790 {
2791 	struct net_device *lower;
2792 	struct list_head *iter;
2793 	int err = NOTIFY_DONE;
2794 	struct dsa_port *dp;
2795 
2796 	if (!netif_is_lag_master(dev))
2797 		return err;
2798 
2799 	netdev_for_each_lower_dev(dev, lower, iter) {
2800 		if (!dsa_slave_dev_check(lower))
2801 			continue;
2802 
2803 		dp = dsa_slave_to_port(lower);
2804 		if (!dp->lag)
2805 			/* Software LAG */
2806 			continue;
2807 
2808 		err = dsa_slave_prechangeupper(lower, info);
2809 		if (notifier_to_errno(err))
2810 			break;
2811 	}
2812 
2813 	return err;
2814 }
2815 
2816 static int
2817 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2818 				 struct netdev_notifier_changeupper_info *info)
2819 {
2820 	struct netlink_ext_ack *ext_ack;
2821 	struct net_device *slave, *br;
2822 	struct dsa_port *dp;
2823 
2824 	ext_ack = netdev_notifier_info_to_extack(&info->info);
2825 
2826 	if (!is_vlan_dev(dev))
2827 		return NOTIFY_DONE;
2828 
2829 	slave = vlan_dev_real_dev(dev);
2830 	if (!dsa_slave_dev_check(slave))
2831 		return NOTIFY_DONE;
2832 
2833 	dp = dsa_slave_to_port(slave);
2834 	br = dsa_port_bridge_dev_get(dp);
2835 	if (!br)
2836 		return NOTIFY_DONE;
2837 
2838 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2839 	if (br_vlan_enabled(br) &&
2840 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2841 		NL_SET_ERR_MSG_MOD(ext_ack,
2842 				   "Cannot enslave VLAN device into VLAN aware bridge");
2843 		return notifier_from_errno(-EINVAL);
2844 	}
2845 
2846 	return NOTIFY_DONE;
2847 }
2848 
2849 static int
2850 dsa_slave_check_8021q_upper(struct net_device *dev,
2851 			    struct netdev_notifier_changeupper_info *info)
2852 {
2853 	struct dsa_port *dp = dsa_slave_to_port(dev);
2854 	struct net_device *br = dsa_port_bridge_dev_get(dp);
2855 	struct bridge_vlan_info br_info;
2856 	struct netlink_ext_ack *extack;
2857 	int err = NOTIFY_DONE;
2858 	u16 vid;
2859 
2860 	if (!br || !br_vlan_enabled(br))
2861 		return NOTIFY_DONE;
2862 
2863 	extack = netdev_notifier_info_to_extack(&info->info);
2864 	vid = vlan_dev_vlan_id(info->upper_dev);
2865 
2866 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2867 	 * device, respectively the VID is not found, returning
2868 	 * 0 means success, which is a failure for us here.
2869 	 */
2870 	err = br_vlan_get_info(br, vid, &br_info);
2871 	if (err == 0) {
2872 		NL_SET_ERR_MSG_MOD(extack,
2873 				   "This VLAN is already configured by the bridge");
2874 		return notifier_from_errno(-EBUSY);
2875 	}
2876 
2877 	return NOTIFY_DONE;
2878 }
2879 
2880 static int
2881 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2882 				      struct netdev_notifier_changeupper_info *info)
2883 {
2884 	struct dsa_switch *ds;
2885 	struct dsa_port *dp;
2886 	int err;
2887 
2888 	if (!dsa_slave_dev_check(dev))
2889 		return dsa_prevent_bridging_8021q_upper(dev, info);
2890 
2891 	dp = dsa_slave_to_port(dev);
2892 	ds = dp->ds;
2893 
2894 	if (ds->ops->port_prechangeupper) {
2895 		err = ds->ops->port_prechangeupper(ds, dp->index, info);
2896 		if (err)
2897 			return notifier_from_errno(err);
2898 	}
2899 
2900 	if (is_vlan_dev(info->upper_dev))
2901 		return dsa_slave_check_8021q_upper(dev, info);
2902 
2903 	return NOTIFY_DONE;
2904 }
2905 
2906 /* To be eligible as a DSA master, a LAG must have all lower interfaces be
2907  * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
2908  * switches in the same switch tree.
2909  */
2910 static int dsa_lag_master_validate(struct net_device *lag_dev,
2911 				   struct netlink_ext_ack *extack)
2912 {
2913 	struct net_device *lower1, *lower2;
2914 	struct list_head *iter1, *iter2;
2915 
2916 	netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
2917 		netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
2918 			if (!netdev_uses_dsa(lower1) ||
2919 			    !netdev_uses_dsa(lower2)) {
2920 				NL_SET_ERR_MSG_MOD(extack,
2921 						   "All LAG ports must be eligible as DSA masters");
2922 				return notifier_from_errno(-EINVAL);
2923 			}
2924 
2925 			if (lower1 == lower2)
2926 				continue;
2927 
2928 			if (!dsa_port_tree_same(lower1->dsa_ptr,
2929 						lower2->dsa_ptr)) {
2930 				NL_SET_ERR_MSG_MOD(extack,
2931 						   "LAG contains DSA masters of disjoint switch trees");
2932 				return notifier_from_errno(-EINVAL);
2933 			}
2934 		}
2935 	}
2936 
2937 	return NOTIFY_DONE;
2938 }
2939 
2940 static int
2941 dsa_master_prechangeupper_sanity_check(struct net_device *master,
2942 				       struct netdev_notifier_changeupper_info *info)
2943 {
2944 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
2945 
2946 	if (!netdev_uses_dsa(master))
2947 		return NOTIFY_DONE;
2948 
2949 	if (!info->linking)
2950 		return NOTIFY_DONE;
2951 
2952 	/* Allow DSA switch uppers */
2953 	if (dsa_slave_dev_check(info->upper_dev))
2954 		return NOTIFY_DONE;
2955 
2956 	/* Allow bridge uppers of DSA masters, subject to further
2957 	 * restrictions in dsa_bridge_prechangelower_sanity_check()
2958 	 */
2959 	if (netif_is_bridge_master(info->upper_dev))
2960 		return NOTIFY_DONE;
2961 
2962 	/* Allow LAG uppers, subject to further restrictions in
2963 	 * dsa_lag_master_prechangelower_sanity_check()
2964 	 */
2965 	if (netif_is_lag_master(info->upper_dev))
2966 		return dsa_lag_master_validate(info->upper_dev, extack);
2967 
2968 	NL_SET_ERR_MSG_MOD(extack,
2969 			   "DSA master cannot join unknown upper interfaces");
2970 	return notifier_from_errno(-EBUSY);
2971 }
2972 
2973 static int
2974 dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
2975 					   struct netdev_notifier_changeupper_info *info)
2976 {
2977 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
2978 	struct net_device *lag_dev = info->upper_dev;
2979 	struct net_device *lower;
2980 	struct list_head *iter;
2981 
2982 	if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
2983 		return NOTIFY_DONE;
2984 
2985 	if (!info->linking)
2986 		return NOTIFY_DONE;
2987 
2988 	if (!netdev_uses_dsa(dev)) {
2989 		NL_SET_ERR_MSG(extack,
2990 			       "Only DSA masters can join a LAG DSA master");
2991 		return notifier_from_errno(-EINVAL);
2992 	}
2993 
2994 	netdev_for_each_lower_dev(lag_dev, lower, iter) {
2995 		if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
2996 			NL_SET_ERR_MSG(extack,
2997 				       "Interface is DSA master for a different switch tree than this LAG");
2998 			return notifier_from_errno(-EINVAL);
2999 		}
3000 
3001 		break;
3002 	}
3003 
3004 	return NOTIFY_DONE;
3005 }
3006 
3007 /* Don't allow bridging of DSA masters, since the bridge layer rx_handler
3008  * prevents the DSA fake ethertype handler to be invoked, so we don't get the
3009  * chance to strip off and parse the DSA switch tag protocol header (the bridge
3010  * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
3011  * frames).
3012  * The only case where that would not be an issue is when bridging can already
3013  * be offloaded, such as when the DSA master is itself a DSA or plain switchdev
3014  * port, and is bridged only with other ports from the same hardware device.
3015  */
3016 static int
3017 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
3018 				       struct netdev_notifier_changeupper_info *info)
3019 {
3020 	struct net_device *br = info->upper_dev;
3021 	struct netlink_ext_ack *extack;
3022 	struct net_device *lower;
3023 	struct list_head *iter;
3024 
3025 	if (!netif_is_bridge_master(br))
3026 		return NOTIFY_DONE;
3027 
3028 	if (!info->linking)
3029 		return NOTIFY_DONE;
3030 
3031 	extack = netdev_notifier_info_to_extack(&info->info);
3032 
3033 	netdev_for_each_lower_dev(br, lower, iter) {
3034 		if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
3035 			continue;
3036 
3037 		if (!netdev_port_same_parent_id(lower, new_lower)) {
3038 			NL_SET_ERR_MSG(extack,
3039 				       "Cannot do software bridging with a DSA master");
3040 			return notifier_from_errno(-EINVAL);
3041 		}
3042 	}
3043 
3044 	return NOTIFY_DONE;
3045 }
3046 
3047 static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
3048 						   struct net_device *lag_dev)
3049 {
3050 	struct net_device *new_master = dsa_tree_find_first_master(dst);
3051 	struct dsa_port *dp;
3052 	int err;
3053 
3054 	dsa_tree_for_each_user_port(dp, dst) {
3055 		if (dsa_port_to_master(dp) != lag_dev)
3056 			continue;
3057 
3058 		err = dsa_slave_change_master(dp->slave, new_master, NULL);
3059 		if (err) {
3060 			netdev_err(dp->slave,
3061 				   "failed to restore master to %s: %pe\n",
3062 				   new_master->name, ERR_PTR(err));
3063 		}
3064 	}
3065 }
3066 
3067 static int dsa_master_lag_join(struct net_device *master,
3068 			       struct net_device *lag_dev,
3069 			       struct netdev_lag_upper_info *uinfo,
3070 			       struct netlink_ext_ack *extack)
3071 {
3072 	struct dsa_port *cpu_dp = master->dsa_ptr;
3073 	struct dsa_switch_tree *dst = cpu_dp->dst;
3074 	struct dsa_port *dp;
3075 	int err;
3076 
3077 	err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3078 	if (err)
3079 		return err;
3080 
3081 	dsa_tree_for_each_user_port(dp, dst) {
3082 		if (dsa_port_to_master(dp) != master)
3083 			continue;
3084 
3085 		err = dsa_slave_change_master(dp->slave, lag_dev, extack);
3086 		if (err)
3087 			goto restore;
3088 	}
3089 
3090 	return 0;
3091 
3092 restore:
3093 	dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3094 		if (dsa_port_to_master(dp) != lag_dev)
3095 			continue;
3096 
3097 		err = dsa_slave_change_master(dp->slave, master, NULL);
3098 		if (err) {
3099 			netdev_err(dp->slave,
3100 				   "failed to restore master to %s: %pe\n",
3101 				   master->name, ERR_PTR(err));
3102 		}
3103 	}
3104 
3105 	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3106 
3107 	return err;
3108 }
3109 
3110 static void dsa_master_lag_leave(struct net_device *master,
3111 				 struct net_device *lag_dev)
3112 {
3113 	struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3114 	struct dsa_switch_tree *dst = cpu_dp->dst;
3115 	struct dsa_port *new_cpu_dp = NULL;
3116 	struct net_device *lower;
3117 	struct list_head *iter;
3118 
3119 	netdev_for_each_lower_dev(lag_dev, lower, iter) {
3120 		if (netdev_uses_dsa(lower)) {
3121 			new_cpu_dp = lower->dsa_ptr;
3122 			break;
3123 		}
3124 	}
3125 
3126 	if (new_cpu_dp) {
3127 		/* Update the CPU port of the user ports still under the LAG
3128 		 * so that dsa_port_to_master() continues to work properly
3129 		 */
3130 		dsa_tree_for_each_user_port(dp, dst)
3131 			if (dsa_port_to_master(dp) == lag_dev)
3132 				dp->cpu_dp = new_cpu_dp;
3133 
3134 		/* Update the index of the virtual CPU port to match the lowest
3135 		 * physical CPU port
3136 		 */
3137 		lag_dev->dsa_ptr = new_cpu_dp;
3138 		wmb();
3139 	} else {
3140 		/* If the LAG DSA master has no ports left, migrate back all
3141 		 * user ports to the first physical CPU port
3142 		 */
3143 		dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
3144 	}
3145 
3146 	/* This DSA master has left its LAG in any case, so let
3147 	 * the CPU port leave the hardware LAG as well
3148 	 */
3149 	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3150 }
3151 
3152 static int dsa_master_changeupper(struct net_device *dev,
3153 				  struct netdev_notifier_changeupper_info *info)
3154 {
3155 	struct netlink_ext_ack *extack;
3156 	int err = NOTIFY_DONE;
3157 
3158 	if (!netdev_uses_dsa(dev))
3159 		return err;
3160 
3161 	extack = netdev_notifier_info_to_extack(&info->info);
3162 
3163 	if (netif_is_lag_master(info->upper_dev)) {
3164 		if (info->linking) {
3165 			err = dsa_master_lag_join(dev, info->upper_dev,
3166 						  info->upper_info, extack);
3167 			err = notifier_from_errno(err);
3168 		} else {
3169 			dsa_master_lag_leave(dev, info->upper_dev);
3170 			err = NOTIFY_OK;
3171 		}
3172 	}
3173 
3174 	return err;
3175 }
3176 
3177 static int dsa_slave_netdevice_event(struct notifier_block *nb,
3178 				     unsigned long event, void *ptr)
3179 {
3180 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3181 
3182 	switch (event) {
3183 	case NETDEV_PRECHANGEUPPER: {
3184 		struct netdev_notifier_changeupper_info *info = ptr;
3185 		int err;
3186 
3187 		err = dsa_slave_prechangeupper_sanity_check(dev, info);
3188 		if (notifier_to_errno(err))
3189 			return err;
3190 
3191 		err = dsa_master_prechangeupper_sanity_check(dev, info);
3192 		if (notifier_to_errno(err))
3193 			return err;
3194 
3195 		err = dsa_lag_master_prechangelower_sanity_check(dev, info);
3196 		if (notifier_to_errno(err))
3197 			return err;
3198 
3199 		err = dsa_bridge_prechangelower_sanity_check(dev, info);
3200 		if (notifier_to_errno(err))
3201 			return err;
3202 
3203 		err = dsa_slave_prechangeupper(dev, ptr);
3204 		if (notifier_to_errno(err))
3205 			return err;
3206 
3207 		err = dsa_slave_lag_prechangeupper(dev, ptr);
3208 		if (notifier_to_errno(err))
3209 			return err;
3210 
3211 		break;
3212 	}
3213 	case NETDEV_CHANGEUPPER: {
3214 		int err;
3215 
3216 		err = dsa_slave_changeupper(dev, ptr);
3217 		if (notifier_to_errno(err))
3218 			return err;
3219 
3220 		err = dsa_slave_lag_changeupper(dev, ptr);
3221 		if (notifier_to_errno(err))
3222 			return err;
3223 
3224 		err = dsa_master_changeupper(dev, ptr);
3225 		if (notifier_to_errno(err))
3226 			return err;
3227 
3228 		break;
3229 	}
3230 	case NETDEV_CHANGELOWERSTATE: {
3231 		struct netdev_notifier_changelowerstate_info *info = ptr;
3232 		struct dsa_port *dp;
3233 		int err = 0;
3234 
3235 		if (dsa_slave_dev_check(dev)) {
3236 			dp = dsa_slave_to_port(dev);
3237 
3238 			err = dsa_port_lag_change(dp, info->lower_state_info);
3239 		}
3240 
3241 		/* Mirror LAG port events on DSA masters that are in
3242 		 * a LAG towards their respective switch CPU ports
3243 		 */
3244 		if (netdev_uses_dsa(dev)) {
3245 			dp = dev->dsa_ptr;
3246 
3247 			err = dsa_port_lag_change(dp, info->lower_state_info);
3248 		}
3249 
3250 		return notifier_from_errno(err);
3251 	}
3252 	case NETDEV_CHANGE:
3253 	case NETDEV_UP: {
3254 		/* Track state of master port.
3255 		 * DSA driver may require the master port (and indirectly
3256 		 * the tagger) to be available for some special operation.
3257 		 */
3258 		if (netdev_uses_dsa(dev)) {
3259 			struct dsa_port *cpu_dp = dev->dsa_ptr;
3260 			struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3261 
3262 			/* Track when the master port is UP */
3263 			dsa_tree_master_oper_state_change(dst, dev,
3264 							  netif_oper_up(dev));
3265 
3266 			/* Track when the master port is ready and can accept
3267 			 * packet.
3268 			 * NETDEV_UP event is not enough to flag a port as ready.
3269 			 * We also have to wait for linkwatch_do_dev to dev_activate
3270 			 * and emit a NETDEV_CHANGE event.
3271 			 * We check if a master port is ready by checking if the dev
3272 			 * have a qdisc assigned and is not noop.
3273 			 */
3274 			dsa_tree_master_admin_state_change(dst, dev,
3275 							   !qdisc_tx_is_noop(dev));
3276 
3277 			return NOTIFY_OK;
3278 		}
3279 
3280 		return NOTIFY_DONE;
3281 	}
3282 	case NETDEV_GOING_DOWN: {
3283 		struct dsa_port *dp, *cpu_dp;
3284 		struct dsa_switch_tree *dst;
3285 		LIST_HEAD(close_list);
3286 
3287 		if (!netdev_uses_dsa(dev))
3288 			return NOTIFY_DONE;
3289 
3290 		cpu_dp = dev->dsa_ptr;
3291 		dst = cpu_dp->ds->dst;
3292 
3293 		dsa_tree_master_admin_state_change(dst, dev, false);
3294 
3295 		list_for_each_entry(dp, &dst->ports, list) {
3296 			if (!dsa_port_is_user(dp))
3297 				continue;
3298 
3299 			if (dp->cpu_dp != cpu_dp)
3300 				continue;
3301 
3302 			list_add(&dp->slave->close_list, &close_list);
3303 		}
3304 
3305 		dev_close_many(&close_list, true);
3306 
3307 		return NOTIFY_OK;
3308 	}
3309 	default:
3310 		break;
3311 	}
3312 
3313 	return NOTIFY_DONE;
3314 }
3315 
3316 static void
3317 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3318 {
3319 	struct switchdev_notifier_fdb_info info = {};
3320 
3321 	info.addr = switchdev_work->addr;
3322 	info.vid = switchdev_work->vid;
3323 	info.offloaded = true;
3324 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3325 				 switchdev_work->orig_dev, &info.info, NULL);
3326 }
3327 
3328 static void dsa_slave_switchdev_event_work(struct work_struct *work)
3329 {
3330 	struct dsa_switchdev_event_work *switchdev_work =
3331 		container_of(work, struct dsa_switchdev_event_work, work);
3332 	const unsigned char *addr = switchdev_work->addr;
3333 	struct net_device *dev = switchdev_work->dev;
3334 	u16 vid = switchdev_work->vid;
3335 	struct dsa_switch *ds;
3336 	struct dsa_port *dp;
3337 	int err;
3338 
3339 	dp = dsa_slave_to_port(dev);
3340 	ds = dp->ds;
3341 
3342 	switch (switchdev_work->event) {
3343 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3344 		if (switchdev_work->host_addr)
3345 			err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3346 		else if (dp->lag)
3347 			err = dsa_port_lag_fdb_add(dp, addr, vid);
3348 		else
3349 			err = dsa_port_fdb_add(dp, addr, vid);
3350 		if (err) {
3351 			dev_err(ds->dev,
3352 				"port %d failed to add %pM vid %d to fdb: %d\n",
3353 				dp->index, addr, vid, err);
3354 			break;
3355 		}
3356 		dsa_fdb_offload_notify(switchdev_work);
3357 		break;
3358 
3359 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3360 		if (switchdev_work->host_addr)
3361 			err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3362 		else if (dp->lag)
3363 			err = dsa_port_lag_fdb_del(dp, addr, vid);
3364 		else
3365 			err = dsa_port_fdb_del(dp, addr, vid);
3366 		if (err) {
3367 			dev_err(ds->dev,
3368 				"port %d failed to delete %pM vid %d from fdb: %d\n",
3369 				dp->index, addr, vid, err);
3370 		}
3371 
3372 		break;
3373 	}
3374 
3375 	kfree(switchdev_work);
3376 }
3377 
3378 static bool dsa_foreign_dev_check(const struct net_device *dev,
3379 				  const struct net_device *foreign_dev)
3380 {
3381 	const struct dsa_port *dp = dsa_slave_to_port(dev);
3382 	struct dsa_switch_tree *dst = dp->ds->dst;
3383 
3384 	if (netif_is_bridge_master(foreign_dev))
3385 		return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3386 
3387 	if (netif_is_bridge_port(foreign_dev))
3388 		return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3389 
3390 	/* Everything else is foreign */
3391 	return true;
3392 }
3393 
3394 static int dsa_slave_fdb_event(struct net_device *dev,
3395 			       struct net_device *orig_dev,
3396 			       unsigned long event, const void *ctx,
3397 			       const struct switchdev_notifier_fdb_info *fdb_info)
3398 {
3399 	struct dsa_switchdev_event_work *switchdev_work;
3400 	struct dsa_port *dp = dsa_slave_to_port(dev);
3401 	bool host_addr = fdb_info->is_local;
3402 	struct dsa_switch *ds = dp->ds;
3403 
3404 	if (ctx && ctx != dp)
3405 		return 0;
3406 
3407 	if (!dp->bridge)
3408 		return 0;
3409 
3410 	if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3411 		if (dsa_port_offloads_bridge_port(dp, orig_dev))
3412 			return 0;
3413 
3414 		/* FDB entries learned by the software bridge or by foreign
3415 		 * bridge ports should be installed as host addresses only if
3416 		 * the driver requests assisted learning.
3417 		 */
3418 		if (!ds->assisted_learning_on_cpu_port)
3419 			return 0;
3420 	}
3421 
3422 	/* Also treat FDB entries on foreign interfaces bridged with us as host
3423 	 * addresses.
3424 	 */
3425 	if (dsa_foreign_dev_check(dev, orig_dev))
3426 		host_addr = true;
3427 
3428 	/* Check early that we're not doing work in vain.
3429 	 * Host addresses on LAG ports still require regular FDB ops,
3430 	 * since the CPU port isn't in a LAG.
3431 	 */
3432 	if (dp->lag && !host_addr) {
3433 		if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3434 			return -EOPNOTSUPP;
3435 	} else {
3436 		if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3437 			return -EOPNOTSUPP;
3438 	}
3439 
3440 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3441 	if (!switchdev_work)
3442 		return -ENOMEM;
3443 
3444 	netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3445 		   event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3446 		   orig_dev->name, fdb_info->addr, fdb_info->vid,
3447 		   host_addr ? " as host address" : "");
3448 
3449 	INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
3450 	switchdev_work->event = event;
3451 	switchdev_work->dev = dev;
3452 	switchdev_work->orig_dev = orig_dev;
3453 
3454 	ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3455 	switchdev_work->vid = fdb_info->vid;
3456 	switchdev_work->host_addr = host_addr;
3457 
3458 	dsa_schedule_work(&switchdev_work->work);
3459 
3460 	return 0;
3461 }
3462 
3463 /* Called under rcu_read_lock() */
3464 static int dsa_slave_switchdev_event(struct notifier_block *unused,
3465 				     unsigned long event, void *ptr)
3466 {
3467 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3468 	int err;
3469 
3470 	switch (event) {
3471 	case SWITCHDEV_PORT_ATTR_SET:
3472 		err = switchdev_handle_port_attr_set(dev, ptr,
3473 						     dsa_slave_dev_check,
3474 						     dsa_slave_port_attr_set);
3475 		return notifier_from_errno(err);
3476 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3477 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3478 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3479 							   dsa_slave_dev_check,
3480 							   dsa_foreign_dev_check,
3481 							   dsa_slave_fdb_event);
3482 		return notifier_from_errno(err);
3483 	default:
3484 		return NOTIFY_DONE;
3485 	}
3486 
3487 	return NOTIFY_OK;
3488 }
3489 
3490 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
3491 					      unsigned long event, void *ptr)
3492 {
3493 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3494 	int err;
3495 
3496 	switch (event) {
3497 	case SWITCHDEV_PORT_OBJ_ADD:
3498 		err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3499 							    dsa_slave_dev_check,
3500 							    dsa_foreign_dev_check,
3501 							    dsa_slave_port_obj_add);
3502 		return notifier_from_errno(err);
3503 	case SWITCHDEV_PORT_OBJ_DEL:
3504 		err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3505 							    dsa_slave_dev_check,
3506 							    dsa_foreign_dev_check,
3507 							    dsa_slave_port_obj_del);
3508 		return notifier_from_errno(err);
3509 	case SWITCHDEV_PORT_ATTR_SET:
3510 		err = switchdev_handle_port_attr_set(dev, ptr,
3511 						     dsa_slave_dev_check,
3512 						     dsa_slave_port_attr_set);
3513 		return notifier_from_errno(err);
3514 	}
3515 
3516 	return NOTIFY_DONE;
3517 }
3518 
3519 static struct notifier_block dsa_slave_nb __read_mostly = {
3520 	.notifier_call  = dsa_slave_netdevice_event,
3521 };
3522 
3523 struct notifier_block dsa_slave_switchdev_notifier = {
3524 	.notifier_call = dsa_slave_switchdev_event,
3525 };
3526 
3527 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
3528 	.notifier_call = dsa_slave_switchdev_blocking_event,
3529 };
3530 
3531 int dsa_slave_register_notifier(void)
3532 {
3533 	struct notifier_block *nb;
3534 	int err;
3535 
3536 	err = register_netdevice_notifier(&dsa_slave_nb);
3537 	if (err)
3538 		return err;
3539 
3540 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
3541 	if (err)
3542 		goto err_switchdev_nb;
3543 
3544 	nb = &dsa_slave_switchdev_blocking_notifier;
3545 	err = register_switchdev_blocking_notifier(nb);
3546 	if (err)
3547 		goto err_switchdev_blocking_nb;
3548 
3549 	return 0;
3550 
3551 err_switchdev_blocking_nb:
3552 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3553 err_switchdev_nb:
3554 	unregister_netdevice_notifier(&dsa_slave_nb);
3555 	return err;
3556 }
3557 
3558 void dsa_slave_unregister_notifier(void)
3559 {
3560 	struct notifier_block *nb;
3561 	int err;
3562 
3563 	nb = &dsa_slave_switchdev_blocking_notifier;
3564 	err = unregister_switchdev_blocking_notifier(nb);
3565 	if (err)
3566 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3567 
3568 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3569 	if (err)
3570 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3571 
3572 	err = unregister_netdevice_notifier(&dsa_slave_nb);
3573 	if (err)
3574 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
3575 }
3576