xref: /openbmc/linux/net/dsa/slave.c (revision c0d3b831)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24 
25 #include "dsa.h"
26 #include "port.h"
27 #include "master.h"
28 #include "netlink.h"
29 #include "slave.h"
30 #include "tag.h"
31 
32 struct dsa_switchdev_event_work {
33 	struct net_device *dev;
34 	struct net_device *orig_dev;
35 	struct work_struct work;
36 	unsigned long event;
37 	/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
38 	 * SWITCHDEV_FDB_DEL_TO_DEVICE
39 	 */
40 	unsigned char addr[ETH_ALEN];
41 	u16 vid;
42 	bool host_addr;
43 };
44 
45 enum dsa_standalone_event {
46 	DSA_UC_ADD,
47 	DSA_UC_DEL,
48 	DSA_MC_ADD,
49 	DSA_MC_DEL,
50 };
51 
52 struct dsa_standalone_event_work {
53 	struct work_struct work;
54 	struct net_device *dev;
55 	enum dsa_standalone_event event;
56 	unsigned char addr[ETH_ALEN];
57 	u16 vid;
58 };
59 
60 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
61 {
62 	return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
63 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
64 	       !ds->needs_standalone_vlan_filtering;
65 }
66 
67 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
68 {
69 	return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
70 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
71 	       !ds->needs_standalone_vlan_filtering;
72 }
73 
74 static void dsa_slave_standalone_event_work(struct work_struct *work)
75 {
76 	struct dsa_standalone_event_work *standalone_work =
77 		container_of(work, struct dsa_standalone_event_work, work);
78 	const unsigned char *addr = standalone_work->addr;
79 	struct net_device *dev = standalone_work->dev;
80 	struct dsa_port *dp = dsa_slave_to_port(dev);
81 	struct switchdev_obj_port_mdb mdb;
82 	struct dsa_switch *ds = dp->ds;
83 	u16 vid = standalone_work->vid;
84 	int err;
85 
86 	switch (standalone_work->event) {
87 	case DSA_UC_ADD:
88 		err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
89 		if (err) {
90 			dev_err(ds->dev,
91 				"port %d failed to add %pM vid %d to fdb: %d\n",
92 				dp->index, addr, vid, err);
93 			break;
94 		}
95 		break;
96 
97 	case DSA_UC_DEL:
98 		err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
99 		if (err) {
100 			dev_err(ds->dev,
101 				"port %d failed to delete %pM vid %d from fdb: %d\n",
102 				dp->index, addr, vid, err);
103 		}
104 
105 		break;
106 	case DSA_MC_ADD:
107 		ether_addr_copy(mdb.addr, addr);
108 		mdb.vid = vid;
109 
110 		err = dsa_port_standalone_host_mdb_add(dp, &mdb);
111 		if (err) {
112 			dev_err(ds->dev,
113 				"port %d failed to add %pM vid %d to mdb: %d\n",
114 				dp->index, addr, vid, err);
115 			break;
116 		}
117 		break;
118 	case DSA_MC_DEL:
119 		ether_addr_copy(mdb.addr, addr);
120 		mdb.vid = vid;
121 
122 		err = dsa_port_standalone_host_mdb_del(dp, &mdb);
123 		if (err) {
124 			dev_err(ds->dev,
125 				"port %d failed to delete %pM vid %d from mdb: %d\n",
126 				dp->index, addr, vid, err);
127 		}
128 
129 		break;
130 	}
131 
132 	kfree(standalone_work);
133 }
134 
135 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
136 					      enum dsa_standalone_event event,
137 					      const unsigned char *addr,
138 					      u16 vid)
139 {
140 	struct dsa_standalone_event_work *standalone_work;
141 
142 	standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
143 	if (!standalone_work)
144 		return -ENOMEM;
145 
146 	INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
147 	standalone_work->event = event;
148 	standalone_work->dev = dev;
149 
150 	ether_addr_copy(standalone_work->addr, addr);
151 	standalone_work->vid = vid;
152 
153 	dsa_schedule_work(&standalone_work->work);
154 
155 	return 0;
156 }
157 
158 static int dsa_slave_sync_uc(struct net_device *dev,
159 			     const unsigned char *addr)
160 {
161 	struct net_device *master = dsa_slave_to_master(dev);
162 	struct dsa_port *dp = dsa_slave_to_port(dev);
163 
164 	dev_uc_add(master, addr);
165 
166 	if (!dsa_switch_supports_uc_filtering(dp->ds))
167 		return 0;
168 
169 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
170 }
171 
172 static int dsa_slave_unsync_uc(struct net_device *dev,
173 			       const unsigned char *addr)
174 {
175 	struct net_device *master = dsa_slave_to_master(dev);
176 	struct dsa_port *dp = dsa_slave_to_port(dev);
177 
178 	dev_uc_del(master, addr);
179 
180 	if (!dsa_switch_supports_uc_filtering(dp->ds))
181 		return 0;
182 
183 	return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
184 }
185 
186 static int dsa_slave_sync_mc(struct net_device *dev,
187 			     const unsigned char *addr)
188 {
189 	struct net_device *master = dsa_slave_to_master(dev);
190 	struct dsa_port *dp = dsa_slave_to_port(dev);
191 
192 	dev_mc_add(master, addr);
193 
194 	if (!dsa_switch_supports_mc_filtering(dp->ds))
195 		return 0;
196 
197 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
198 }
199 
200 static int dsa_slave_unsync_mc(struct net_device *dev,
201 			       const unsigned char *addr)
202 {
203 	struct net_device *master = dsa_slave_to_master(dev);
204 	struct dsa_port *dp = dsa_slave_to_port(dev);
205 
206 	dev_mc_del(master, addr);
207 
208 	if (!dsa_switch_supports_mc_filtering(dp->ds))
209 		return 0;
210 
211 	return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
212 }
213 
214 void dsa_slave_sync_ha(struct net_device *dev)
215 {
216 	struct dsa_port *dp = dsa_slave_to_port(dev);
217 	struct dsa_switch *ds = dp->ds;
218 	struct netdev_hw_addr *ha;
219 
220 	netif_addr_lock_bh(dev);
221 
222 	netdev_for_each_synced_mc_addr(ha, dev)
223 		dsa_slave_sync_mc(dev, ha->addr);
224 
225 	netdev_for_each_synced_uc_addr(ha, dev)
226 		dsa_slave_sync_uc(dev, ha->addr);
227 
228 	netif_addr_unlock_bh(dev);
229 
230 	if (dsa_switch_supports_uc_filtering(ds) ||
231 	    dsa_switch_supports_mc_filtering(ds))
232 		dsa_flush_workqueue();
233 }
234 
235 void dsa_slave_unsync_ha(struct net_device *dev)
236 {
237 	struct dsa_port *dp = dsa_slave_to_port(dev);
238 	struct dsa_switch *ds = dp->ds;
239 	struct netdev_hw_addr *ha;
240 
241 	netif_addr_lock_bh(dev);
242 
243 	netdev_for_each_synced_uc_addr(ha, dev)
244 		dsa_slave_unsync_uc(dev, ha->addr);
245 
246 	netdev_for_each_synced_mc_addr(ha, dev)
247 		dsa_slave_unsync_mc(dev, ha->addr);
248 
249 	netif_addr_unlock_bh(dev);
250 
251 	if (dsa_switch_supports_uc_filtering(ds) ||
252 	    dsa_switch_supports_mc_filtering(ds))
253 		dsa_flush_workqueue();
254 }
255 
256 /* slave mii_bus handling ***************************************************/
257 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
258 {
259 	struct dsa_switch *ds = bus->priv;
260 
261 	if (ds->phys_mii_mask & (1 << addr))
262 		return ds->ops->phy_read(ds, addr, reg);
263 
264 	return 0xffff;
265 }
266 
267 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
268 {
269 	struct dsa_switch *ds = bus->priv;
270 
271 	if (ds->phys_mii_mask & (1 << addr))
272 		return ds->ops->phy_write(ds, addr, reg, val);
273 
274 	return 0;
275 }
276 
277 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
278 {
279 	ds->slave_mii_bus->priv = (void *)ds;
280 	ds->slave_mii_bus->name = "dsa slave smi";
281 	ds->slave_mii_bus->read = dsa_slave_phy_read;
282 	ds->slave_mii_bus->write = dsa_slave_phy_write;
283 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
284 		 ds->dst->index, ds->index);
285 	ds->slave_mii_bus->parent = ds->dev;
286 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
287 }
288 
289 
290 /* slave device handling ****************************************************/
291 static int dsa_slave_get_iflink(const struct net_device *dev)
292 {
293 	return dsa_slave_to_master(dev)->ifindex;
294 }
295 
296 static int dsa_slave_open(struct net_device *dev)
297 {
298 	struct net_device *master = dsa_slave_to_master(dev);
299 	struct dsa_port *dp = dsa_slave_to_port(dev);
300 	struct dsa_switch *ds = dp->ds;
301 	int err;
302 
303 	err = dev_open(master, NULL);
304 	if (err < 0) {
305 		netdev_err(dev, "failed to open master %s\n", master->name);
306 		goto out;
307 	}
308 
309 	if (dsa_switch_supports_uc_filtering(ds)) {
310 		err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
311 		if (err)
312 			goto out;
313 	}
314 
315 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
316 		err = dev_uc_add(master, dev->dev_addr);
317 		if (err < 0)
318 			goto del_host_addr;
319 	}
320 
321 	err = dsa_port_enable_rt(dp, dev->phydev);
322 	if (err)
323 		goto del_unicast;
324 
325 	return 0;
326 
327 del_unicast:
328 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
329 		dev_uc_del(master, dev->dev_addr);
330 del_host_addr:
331 	if (dsa_switch_supports_uc_filtering(ds))
332 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
333 out:
334 	return err;
335 }
336 
337 static int dsa_slave_close(struct net_device *dev)
338 {
339 	struct net_device *master = dsa_slave_to_master(dev);
340 	struct dsa_port *dp = dsa_slave_to_port(dev);
341 	struct dsa_switch *ds = dp->ds;
342 
343 	dsa_port_disable_rt(dp);
344 
345 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
346 		dev_uc_del(master, dev->dev_addr);
347 
348 	if (dsa_switch_supports_uc_filtering(ds))
349 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
350 
351 	return 0;
352 }
353 
354 static void dsa_slave_manage_host_flood(struct net_device *dev)
355 {
356 	bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
357 	struct dsa_port *dp = dsa_slave_to_port(dev);
358 	bool uc = dev->flags & IFF_PROMISC;
359 
360 	dsa_port_set_host_flood(dp, uc, mc);
361 }
362 
363 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
364 {
365 	struct net_device *master = dsa_slave_to_master(dev);
366 	struct dsa_port *dp = dsa_slave_to_port(dev);
367 	struct dsa_switch *ds = dp->ds;
368 
369 	if (change & IFF_ALLMULTI)
370 		dev_set_allmulti(master,
371 				 dev->flags & IFF_ALLMULTI ? 1 : -1);
372 	if (change & IFF_PROMISC)
373 		dev_set_promiscuity(master,
374 				    dev->flags & IFF_PROMISC ? 1 : -1);
375 
376 	if (dsa_switch_supports_uc_filtering(ds) &&
377 	    dsa_switch_supports_mc_filtering(ds))
378 		dsa_slave_manage_host_flood(dev);
379 }
380 
381 static void dsa_slave_set_rx_mode(struct net_device *dev)
382 {
383 	__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
384 	__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
385 }
386 
387 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
388 {
389 	struct net_device *master = dsa_slave_to_master(dev);
390 	struct dsa_port *dp = dsa_slave_to_port(dev);
391 	struct dsa_switch *ds = dp->ds;
392 	struct sockaddr *addr = a;
393 	int err;
394 
395 	if (!is_valid_ether_addr(addr->sa_data))
396 		return -EADDRNOTAVAIL;
397 
398 	/* If the port is down, the address isn't synced yet to hardware or
399 	 * to the DSA master, so there is nothing to change.
400 	 */
401 	if (!(dev->flags & IFF_UP))
402 		goto out_change_dev_addr;
403 
404 	if (dsa_switch_supports_uc_filtering(ds)) {
405 		err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
406 		if (err)
407 			return err;
408 	}
409 
410 	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
411 		err = dev_uc_add(master, addr->sa_data);
412 		if (err < 0)
413 			goto del_unicast;
414 	}
415 
416 	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
417 		dev_uc_del(master, dev->dev_addr);
418 
419 	if (dsa_switch_supports_uc_filtering(ds))
420 		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
421 
422 out_change_dev_addr:
423 	eth_hw_addr_set(dev, addr->sa_data);
424 
425 	return 0;
426 
427 del_unicast:
428 	if (dsa_switch_supports_uc_filtering(ds))
429 		dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
430 
431 	return err;
432 }
433 
434 struct dsa_slave_dump_ctx {
435 	struct net_device *dev;
436 	struct sk_buff *skb;
437 	struct netlink_callback *cb;
438 	int idx;
439 };
440 
441 static int
442 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
443 			   bool is_static, void *data)
444 {
445 	struct dsa_slave_dump_ctx *dump = data;
446 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
447 	u32 seq = dump->cb->nlh->nlmsg_seq;
448 	struct nlmsghdr *nlh;
449 	struct ndmsg *ndm;
450 
451 	if (dump->idx < dump->cb->args[2])
452 		goto skip;
453 
454 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
455 			sizeof(*ndm), NLM_F_MULTI);
456 	if (!nlh)
457 		return -EMSGSIZE;
458 
459 	ndm = nlmsg_data(nlh);
460 	ndm->ndm_family  = AF_BRIDGE;
461 	ndm->ndm_pad1    = 0;
462 	ndm->ndm_pad2    = 0;
463 	ndm->ndm_flags   = NTF_SELF;
464 	ndm->ndm_type    = 0;
465 	ndm->ndm_ifindex = dump->dev->ifindex;
466 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
467 
468 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
469 		goto nla_put_failure;
470 
471 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
472 		goto nla_put_failure;
473 
474 	nlmsg_end(dump->skb, nlh);
475 
476 skip:
477 	dump->idx++;
478 	return 0;
479 
480 nla_put_failure:
481 	nlmsg_cancel(dump->skb, nlh);
482 	return -EMSGSIZE;
483 }
484 
485 static int
486 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
487 		   struct net_device *dev, struct net_device *filter_dev,
488 		   int *idx)
489 {
490 	struct dsa_port *dp = dsa_slave_to_port(dev);
491 	struct dsa_slave_dump_ctx dump = {
492 		.dev = dev,
493 		.skb = skb,
494 		.cb = cb,
495 		.idx = *idx,
496 	};
497 	int err;
498 
499 	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
500 	*idx = dump.idx;
501 
502 	return err;
503 }
504 
505 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
506 {
507 	struct dsa_slave_priv *p = netdev_priv(dev);
508 	struct dsa_switch *ds = p->dp->ds;
509 	int port = p->dp->index;
510 
511 	/* Pass through to switch driver if it supports timestamping */
512 	switch (cmd) {
513 	case SIOCGHWTSTAMP:
514 		if (ds->ops->port_hwtstamp_get)
515 			return ds->ops->port_hwtstamp_get(ds, port, ifr);
516 		break;
517 	case SIOCSHWTSTAMP:
518 		if (ds->ops->port_hwtstamp_set)
519 			return ds->ops->port_hwtstamp_set(ds, port, ifr);
520 		break;
521 	}
522 
523 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
524 }
525 
526 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
527 				   const struct switchdev_attr *attr,
528 				   struct netlink_ext_ack *extack)
529 {
530 	struct dsa_port *dp = dsa_slave_to_port(dev);
531 	int ret;
532 
533 	if (ctx && ctx != dp)
534 		return 0;
535 
536 	switch (attr->id) {
537 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
538 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
539 			return -EOPNOTSUPP;
540 
541 		ret = dsa_port_set_state(dp, attr->u.stp_state, true);
542 		break;
543 	case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
544 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
545 			return -EOPNOTSUPP;
546 
547 		ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
548 		break;
549 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
550 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
551 			return -EOPNOTSUPP;
552 
553 		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
554 					      extack);
555 		break;
556 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
557 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
558 			return -EOPNOTSUPP;
559 
560 		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
561 		break;
562 	case SWITCHDEV_ATTR_ID_BRIDGE_MST:
563 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
564 			return -EOPNOTSUPP;
565 
566 		ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
567 		break;
568 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
569 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
570 			return -EOPNOTSUPP;
571 
572 		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
573 						extack);
574 		break;
575 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
576 		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
577 			return -EOPNOTSUPP;
578 
579 		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
580 		break;
581 	case SWITCHDEV_ATTR_ID_VLAN_MSTI:
582 		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
583 			return -EOPNOTSUPP;
584 
585 		ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
586 		break;
587 	default:
588 		ret = -EOPNOTSUPP;
589 		break;
590 	}
591 
592 	return ret;
593 }
594 
595 /* Must be called under rcu_read_lock() */
596 static int
597 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
598 				      const struct switchdev_obj_port_vlan *vlan)
599 {
600 	struct net_device *upper_dev;
601 	struct list_head *iter;
602 
603 	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
604 		u16 vid;
605 
606 		if (!is_vlan_dev(upper_dev))
607 			continue;
608 
609 		vid = vlan_dev_vlan_id(upper_dev);
610 		if (vid == vlan->vid)
611 			return -EBUSY;
612 	}
613 
614 	return 0;
615 }
616 
617 static int dsa_slave_vlan_add(struct net_device *dev,
618 			      const struct switchdev_obj *obj,
619 			      struct netlink_ext_ack *extack)
620 {
621 	struct dsa_port *dp = dsa_slave_to_port(dev);
622 	struct switchdev_obj_port_vlan *vlan;
623 	int err;
624 
625 	if (dsa_port_skip_vlan_configuration(dp)) {
626 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
627 		return 0;
628 	}
629 
630 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
631 
632 	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
633 	 * the same VID.
634 	 */
635 	if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
636 		rcu_read_lock();
637 		err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
638 		rcu_read_unlock();
639 		if (err) {
640 			NL_SET_ERR_MSG_MOD(extack,
641 					   "Port already has a VLAN upper with this VID");
642 			return err;
643 		}
644 	}
645 
646 	return dsa_port_vlan_add(dp, vlan, extack);
647 }
648 
649 /* Offload a VLAN installed on the bridge or on a foreign interface by
650  * installing it as a VLAN towards the CPU port.
651  */
652 static int dsa_slave_host_vlan_add(struct net_device *dev,
653 				   const struct switchdev_obj *obj,
654 				   struct netlink_ext_ack *extack)
655 {
656 	struct dsa_port *dp = dsa_slave_to_port(dev);
657 	struct switchdev_obj_port_vlan vlan;
658 
659 	/* Do nothing if this is a software bridge */
660 	if (!dp->bridge)
661 		return -EOPNOTSUPP;
662 
663 	if (dsa_port_skip_vlan_configuration(dp)) {
664 		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
665 		return 0;
666 	}
667 
668 	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
669 
670 	/* Even though drivers often handle CPU membership in special ways,
671 	 * it doesn't make sense to program a PVID, so clear this flag.
672 	 */
673 	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
674 
675 	return dsa_port_host_vlan_add(dp, &vlan, extack);
676 }
677 
678 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
679 				  const struct switchdev_obj *obj,
680 				  struct netlink_ext_ack *extack)
681 {
682 	struct dsa_port *dp = dsa_slave_to_port(dev);
683 	int err;
684 
685 	if (ctx && ctx != dp)
686 		return 0;
687 
688 	switch (obj->id) {
689 	case SWITCHDEV_OBJ_ID_PORT_MDB:
690 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
691 			return -EOPNOTSUPP;
692 
693 		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
694 		break;
695 	case SWITCHDEV_OBJ_ID_HOST_MDB:
696 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
697 			return -EOPNOTSUPP;
698 
699 		err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
700 		break;
701 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
702 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
703 			err = dsa_slave_vlan_add(dev, obj, extack);
704 		else
705 			err = dsa_slave_host_vlan_add(dev, obj, extack);
706 		break;
707 	case SWITCHDEV_OBJ_ID_MRP:
708 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
709 			return -EOPNOTSUPP;
710 
711 		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
712 		break;
713 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
714 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
715 			return -EOPNOTSUPP;
716 
717 		err = dsa_port_mrp_add_ring_role(dp,
718 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
719 		break;
720 	default:
721 		err = -EOPNOTSUPP;
722 		break;
723 	}
724 
725 	return err;
726 }
727 
728 static int dsa_slave_vlan_del(struct net_device *dev,
729 			      const struct switchdev_obj *obj)
730 {
731 	struct dsa_port *dp = dsa_slave_to_port(dev);
732 	struct switchdev_obj_port_vlan *vlan;
733 
734 	if (dsa_port_skip_vlan_configuration(dp))
735 		return 0;
736 
737 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
738 
739 	return dsa_port_vlan_del(dp, vlan);
740 }
741 
742 static int dsa_slave_host_vlan_del(struct net_device *dev,
743 				   const struct switchdev_obj *obj)
744 {
745 	struct dsa_port *dp = dsa_slave_to_port(dev);
746 	struct switchdev_obj_port_vlan *vlan;
747 
748 	/* Do nothing if this is a software bridge */
749 	if (!dp->bridge)
750 		return -EOPNOTSUPP;
751 
752 	if (dsa_port_skip_vlan_configuration(dp))
753 		return 0;
754 
755 	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
756 
757 	return dsa_port_host_vlan_del(dp, vlan);
758 }
759 
760 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
761 				  const struct switchdev_obj *obj)
762 {
763 	struct dsa_port *dp = dsa_slave_to_port(dev);
764 	int err;
765 
766 	if (ctx && ctx != dp)
767 		return 0;
768 
769 	switch (obj->id) {
770 	case SWITCHDEV_OBJ_ID_PORT_MDB:
771 		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
772 			return -EOPNOTSUPP;
773 
774 		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
775 		break;
776 	case SWITCHDEV_OBJ_ID_HOST_MDB:
777 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
778 			return -EOPNOTSUPP;
779 
780 		err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
781 		break;
782 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
783 		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
784 			err = dsa_slave_vlan_del(dev, obj);
785 		else
786 			err = dsa_slave_host_vlan_del(dev, obj);
787 		break;
788 	case SWITCHDEV_OBJ_ID_MRP:
789 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
790 			return -EOPNOTSUPP;
791 
792 		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
793 		break;
794 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
795 		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
796 			return -EOPNOTSUPP;
797 
798 		err = dsa_port_mrp_del_ring_role(dp,
799 						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
800 		break;
801 	default:
802 		err = -EOPNOTSUPP;
803 		break;
804 	}
805 
806 	return err;
807 }
808 
809 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
810 						     struct sk_buff *skb)
811 {
812 #ifdef CONFIG_NET_POLL_CONTROLLER
813 	struct dsa_slave_priv *p = netdev_priv(dev);
814 
815 	return netpoll_send_skb(p->netpoll, skb);
816 #else
817 	BUG();
818 	return NETDEV_TX_OK;
819 #endif
820 }
821 
822 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
823 				 struct sk_buff *skb)
824 {
825 	struct dsa_switch *ds = p->dp->ds;
826 
827 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
828 		return;
829 
830 	if (!ds->ops->port_txtstamp)
831 		return;
832 
833 	ds->ops->port_txtstamp(ds, p->dp->index, skb);
834 }
835 
836 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
837 {
838 	/* SKB for netpoll still need to be mangled with the protocol-specific
839 	 * tag to be successfully transmitted
840 	 */
841 	if (unlikely(netpoll_tx_running(dev)))
842 		return dsa_slave_netpoll_send_skb(dev, skb);
843 
844 	/* Queue the SKB for transmission on the parent interface, but
845 	 * do not modify its EtherType
846 	 */
847 	skb->dev = dsa_slave_to_master(dev);
848 	dev_queue_xmit(skb);
849 
850 	return NETDEV_TX_OK;
851 }
852 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
853 
854 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
855 {
856 	int needed_headroom = dev->needed_headroom;
857 	int needed_tailroom = dev->needed_tailroom;
858 
859 	/* For tail taggers, we need to pad short frames ourselves, to ensure
860 	 * that the tail tag does not fail at its role of being at the end of
861 	 * the packet, once the master interface pads the frame. Account for
862 	 * that pad length here, and pad later.
863 	 */
864 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
865 		needed_tailroom += ETH_ZLEN - skb->len;
866 	/* skb_headroom() returns unsigned int... */
867 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
868 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
869 
870 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
871 		/* No reallocation needed, yay! */
872 		return 0;
873 
874 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
875 				GFP_ATOMIC);
876 }
877 
878 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
879 {
880 	struct dsa_slave_priv *p = netdev_priv(dev);
881 	struct sk_buff *nskb;
882 
883 	dev_sw_netstats_tx_add(dev, 1, skb->len);
884 
885 	memset(skb->cb, 0, sizeof(skb->cb));
886 
887 	/* Handle tx timestamp if any */
888 	dsa_skb_tx_timestamp(p, skb);
889 
890 	if (dsa_realloc_skb(skb, dev)) {
891 		dev_kfree_skb_any(skb);
892 		return NETDEV_TX_OK;
893 	}
894 
895 	/* needed_tailroom should still be 'warm' in the cache line from
896 	 * dsa_realloc_skb(), which has also ensured that padding is safe.
897 	 */
898 	if (dev->needed_tailroom)
899 		eth_skb_pad(skb);
900 
901 	/* Transmit function may have to reallocate the original SKB,
902 	 * in which case it must have freed it. Only free it here on error.
903 	 */
904 	nskb = p->xmit(skb, dev);
905 	if (!nskb) {
906 		kfree_skb(skb);
907 		return NETDEV_TX_OK;
908 	}
909 
910 	return dsa_enqueue_skb(nskb, dev);
911 }
912 
913 /* ethtool operations *******************************************************/
914 
915 static void dsa_slave_get_drvinfo(struct net_device *dev,
916 				  struct ethtool_drvinfo *drvinfo)
917 {
918 	strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
919 	strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
920 	strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
921 }
922 
923 static int dsa_slave_get_regs_len(struct net_device *dev)
924 {
925 	struct dsa_port *dp = dsa_slave_to_port(dev);
926 	struct dsa_switch *ds = dp->ds;
927 
928 	if (ds->ops->get_regs_len)
929 		return ds->ops->get_regs_len(ds, dp->index);
930 
931 	return -EOPNOTSUPP;
932 }
933 
934 static void
935 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
936 {
937 	struct dsa_port *dp = dsa_slave_to_port(dev);
938 	struct dsa_switch *ds = dp->ds;
939 
940 	if (ds->ops->get_regs)
941 		ds->ops->get_regs(ds, dp->index, regs, _p);
942 }
943 
944 static int dsa_slave_nway_reset(struct net_device *dev)
945 {
946 	struct dsa_port *dp = dsa_slave_to_port(dev);
947 
948 	return phylink_ethtool_nway_reset(dp->pl);
949 }
950 
951 static int dsa_slave_get_eeprom_len(struct net_device *dev)
952 {
953 	struct dsa_port *dp = dsa_slave_to_port(dev);
954 	struct dsa_switch *ds = dp->ds;
955 
956 	if (ds->cd && ds->cd->eeprom_len)
957 		return ds->cd->eeprom_len;
958 
959 	if (ds->ops->get_eeprom_len)
960 		return ds->ops->get_eeprom_len(ds);
961 
962 	return 0;
963 }
964 
965 static int dsa_slave_get_eeprom(struct net_device *dev,
966 				struct ethtool_eeprom *eeprom, u8 *data)
967 {
968 	struct dsa_port *dp = dsa_slave_to_port(dev);
969 	struct dsa_switch *ds = dp->ds;
970 
971 	if (ds->ops->get_eeprom)
972 		return ds->ops->get_eeprom(ds, eeprom, data);
973 
974 	return -EOPNOTSUPP;
975 }
976 
977 static int dsa_slave_set_eeprom(struct net_device *dev,
978 				struct ethtool_eeprom *eeprom, u8 *data)
979 {
980 	struct dsa_port *dp = dsa_slave_to_port(dev);
981 	struct dsa_switch *ds = dp->ds;
982 
983 	if (ds->ops->set_eeprom)
984 		return ds->ops->set_eeprom(ds, eeprom, data);
985 
986 	return -EOPNOTSUPP;
987 }
988 
989 static void dsa_slave_get_strings(struct net_device *dev,
990 				  uint32_t stringset, uint8_t *data)
991 {
992 	struct dsa_port *dp = dsa_slave_to_port(dev);
993 	struct dsa_switch *ds = dp->ds;
994 
995 	if (stringset == ETH_SS_STATS) {
996 		int len = ETH_GSTRING_LEN;
997 
998 		strncpy(data, "tx_packets", len);
999 		strncpy(data + len, "tx_bytes", len);
1000 		strncpy(data + 2 * len, "rx_packets", len);
1001 		strncpy(data + 3 * len, "rx_bytes", len);
1002 		if (ds->ops->get_strings)
1003 			ds->ops->get_strings(ds, dp->index, stringset,
1004 					     data + 4 * len);
1005 	} else if (stringset ==  ETH_SS_TEST) {
1006 		net_selftest_get_strings(data);
1007 	}
1008 
1009 }
1010 
1011 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
1012 					struct ethtool_stats *stats,
1013 					uint64_t *data)
1014 {
1015 	struct dsa_port *dp = dsa_slave_to_port(dev);
1016 	struct dsa_switch *ds = dp->ds;
1017 	struct pcpu_sw_netstats *s;
1018 	unsigned int start;
1019 	int i;
1020 
1021 	for_each_possible_cpu(i) {
1022 		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1023 
1024 		s = per_cpu_ptr(dev->tstats, i);
1025 		do {
1026 			start = u64_stats_fetch_begin(&s->syncp);
1027 			tx_packets = u64_stats_read(&s->tx_packets);
1028 			tx_bytes = u64_stats_read(&s->tx_bytes);
1029 			rx_packets = u64_stats_read(&s->rx_packets);
1030 			rx_bytes = u64_stats_read(&s->rx_bytes);
1031 		} while (u64_stats_fetch_retry(&s->syncp, start));
1032 		data[0] += tx_packets;
1033 		data[1] += tx_bytes;
1034 		data[2] += rx_packets;
1035 		data[3] += rx_bytes;
1036 	}
1037 	if (ds->ops->get_ethtool_stats)
1038 		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1039 }
1040 
1041 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
1042 {
1043 	struct dsa_port *dp = dsa_slave_to_port(dev);
1044 	struct dsa_switch *ds = dp->ds;
1045 
1046 	if (sset == ETH_SS_STATS) {
1047 		int count = 0;
1048 
1049 		if (ds->ops->get_sset_count) {
1050 			count = ds->ops->get_sset_count(ds, dp->index, sset);
1051 			if (count < 0)
1052 				return count;
1053 		}
1054 
1055 		return count + 4;
1056 	} else if (sset ==  ETH_SS_TEST) {
1057 		return net_selftest_get_count();
1058 	}
1059 
1060 	return -EOPNOTSUPP;
1061 }
1062 
1063 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
1064 					struct ethtool_eth_phy_stats *phy_stats)
1065 {
1066 	struct dsa_port *dp = dsa_slave_to_port(dev);
1067 	struct dsa_switch *ds = dp->ds;
1068 
1069 	if (ds->ops->get_eth_phy_stats)
1070 		ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1071 }
1072 
1073 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
1074 					struct ethtool_eth_mac_stats *mac_stats)
1075 {
1076 	struct dsa_port *dp = dsa_slave_to_port(dev);
1077 	struct dsa_switch *ds = dp->ds;
1078 
1079 	if (ds->ops->get_eth_mac_stats)
1080 		ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1081 }
1082 
1083 static void
1084 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
1085 			     struct ethtool_eth_ctrl_stats *ctrl_stats)
1086 {
1087 	struct dsa_port *dp = dsa_slave_to_port(dev);
1088 	struct dsa_switch *ds = dp->ds;
1089 
1090 	if (ds->ops->get_eth_ctrl_stats)
1091 		ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1092 }
1093 
1094 static void
1095 dsa_slave_get_rmon_stats(struct net_device *dev,
1096 			 struct ethtool_rmon_stats *rmon_stats,
1097 			 const struct ethtool_rmon_hist_range **ranges)
1098 {
1099 	struct dsa_port *dp = dsa_slave_to_port(dev);
1100 	struct dsa_switch *ds = dp->ds;
1101 
1102 	if (ds->ops->get_rmon_stats)
1103 		ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1104 }
1105 
1106 static void dsa_slave_net_selftest(struct net_device *ndev,
1107 				   struct ethtool_test *etest, u64 *buf)
1108 {
1109 	struct dsa_port *dp = dsa_slave_to_port(ndev);
1110 	struct dsa_switch *ds = dp->ds;
1111 
1112 	if (ds->ops->self_test) {
1113 		ds->ops->self_test(ds, dp->index, etest, buf);
1114 		return;
1115 	}
1116 
1117 	net_selftest(ndev, etest, buf);
1118 }
1119 
1120 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1121 {
1122 	struct dsa_port *dp = dsa_slave_to_port(dev);
1123 	struct dsa_switch *ds = dp->ds;
1124 
1125 	phylink_ethtool_get_wol(dp->pl, w);
1126 
1127 	if (ds->ops->get_wol)
1128 		ds->ops->get_wol(ds, dp->index, w);
1129 }
1130 
1131 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1132 {
1133 	struct dsa_port *dp = dsa_slave_to_port(dev);
1134 	struct dsa_switch *ds = dp->ds;
1135 	int ret = -EOPNOTSUPP;
1136 
1137 	phylink_ethtool_set_wol(dp->pl, w);
1138 
1139 	if (ds->ops->set_wol)
1140 		ret = ds->ops->set_wol(ds, dp->index, w);
1141 
1142 	return ret;
1143 }
1144 
1145 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1146 {
1147 	struct dsa_port *dp = dsa_slave_to_port(dev);
1148 	struct dsa_switch *ds = dp->ds;
1149 	int ret;
1150 
1151 	/* Port's PHY and MAC both need to be EEE capable */
1152 	if (!dev->phydev || !dp->pl)
1153 		return -ENODEV;
1154 
1155 	if (!ds->ops->set_mac_eee)
1156 		return -EOPNOTSUPP;
1157 
1158 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
1159 	if (ret)
1160 		return ret;
1161 
1162 	return phylink_ethtool_set_eee(dp->pl, e);
1163 }
1164 
1165 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1166 {
1167 	struct dsa_port *dp = dsa_slave_to_port(dev);
1168 	struct dsa_switch *ds = dp->ds;
1169 	int ret;
1170 
1171 	/* Port's PHY and MAC both need to be EEE capable */
1172 	if (!dev->phydev || !dp->pl)
1173 		return -ENODEV;
1174 
1175 	if (!ds->ops->get_mac_eee)
1176 		return -EOPNOTSUPP;
1177 
1178 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
1179 	if (ret)
1180 		return ret;
1181 
1182 	return phylink_ethtool_get_eee(dp->pl, e);
1183 }
1184 
1185 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1186 					struct ethtool_link_ksettings *cmd)
1187 {
1188 	struct dsa_port *dp = dsa_slave_to_port(dev);
1189 
1190 	return phylink_ethtool_ksettings_get(dp->pl, cmd);
1191 }
1192 
1193 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1194 					const struct ethtool_link_ksettings *cmd)
1195 {
1196 	struct dsa_port *dp = dsa_slave_to_port(dev);
1197 
1198 	return phylink_ethtool_ksettings_set(dp->pl, cmd);
1199 }
1200 
1201 static void dsa_slave_get_pause_stats(struct net_device *dev,
1202 				  struct ethtool_pause_stats *pause_stats)
1203 {
1204 	struct dsa_port *dp = dsa_slave_to_port(dev);
1205 	struct dsa_switch *ds = dp->ds;
1206 
1207 	if (ds->ops->get_pause_stats)
1208 		ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1209 }
1210 
1211 static void dsa_slave_get_pauseparam(struct net_device *dev,
1212 				     struct ethtool_pauseparam *pause)
1213 {
1214 	struct dsa_port *dp = dsa_slave_to_port(dev);
1215 
1216 	phylink_ethtool_get_pauseparam(dp->pl, pause);
1217 }
1218 
1219 static int dsa_slave_set_pauseparam(struct net_device *dev,
1220 				    struct ethtool_pauseparam *pause)
1221 {
1222 	struct dsa_port *dp = dsa_slave_to_port(dev);
1223 
1224 	return phylink_ethtool_set_pauseparam(dp->pl, pause);
1225 }
1226 
1227 #ifdef CONFIG_NET_POLL_CONTROLLER
1228 static int dsa_slave_netpoll_setup(struct net_device *dev,
1229 				   struct netpoll_info *ni)
1230 {
1231 	struct net_device *master = dsa_slave_to_master(dev);
1232 	struct dsa_slave_priv *p = netdev_priv(dev);
1233 	struct netpoll *netpoll;
1234 	int err = 0;
1235 
1236 	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1237 	if (!netpoll)
1238 		return -ENOMEM;
1239 
1240 	err = __netpoll_setup(netpoll, master);
1241 	if (err) {
1242 		kfree(netpoll);
1243 		goto out;
1244 	}
1245 
1246 	p->netpoll = netpoll;
1247 out:
1248 	return err;
1249 }
1250 
1251 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1252 {
1253 	struct dsa_slave_priv *p = netdev_priv(dev);
1254 	struct netpoll *netpoll = p->netpoll;
1255 
1256 	if (!netpoll)
1257 		return;
1258 
1259 	p->netpoll = NULL;
1260 
1261 	__netpoll_free(netpoll);
1262 }
1263 
1264 static void dsa_slave_poll_controller(struct net_device *dev)
1265 {
1266 }
1267 #endif
1268 
1269 static struct dsa_mall_tc_entry *
1270 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1271 {
1272 	struct dsa_slave_priv *p = netdev_priv(dev);
1273 	struct dsa_mall_tc_entry *mall_tc_entry;
1274 
1275 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1276 		if (mall_tc_entry->cookie == cookie)
1277 			return mall_tc_entry;
1278 
1279 	return NULL;
1280 }
1281 
1282 static int
1283 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1284 				  struct tc_cls_matchall_offload *cls,
1285 				  bool ingress)
1286 {
1287 	struct netlink_ext_ack *extack = cls->common.extack;
1288 	struct dsa_port *dp = dsa_slave_to_port(dev);
1289 	struct dsa_slave_priv *p = netdev_priv(dev);
1290 	struct dsa_mall_mirror_tc_entry *mirror;
1291 	struct dsa_mall_tc_entry *mall_tc_entry;
1292 	struct dsa_switch *ds = dp->ds;
1293 	struct flow_action_entry *act;
1294 	struct dsa_port *to_dp;
1295 	int err;
1296 
1297 	if (!ds->ops->port_mirror_add)
1298 		return -EOPNOTSUPP;
1299 
1300 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1301 					      cls->common.extack))
1302 		return -EOPNOTSUPP;
1303 
1304 	act = &cls->rule->action.entries[0];
1305 
1306 	if (!act->dev)
1307 		return -EINVAL;
1308 
1309 	if (!dsa_slave_dev_check(act->dev))
1310 		return -EOPNOTSUPP;
1311 
1312 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1313 	if (!mall_tc_entry)
1314 		return -ENOMEM;
1315 
1316 	mall_tc_entry->cookie = cls->cookie;
1317 	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1318 	mirror = &mall_tc_entry->mirror;
1319 
1320 	to_dp = dsa_slave_to_port(act->dev);
1321 
1322 	mirror->to_local_port = to_dp->index;
1323 	mirror->ingress = ingress;
1324 
1325 	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1326 	if (err) {
1327 		kfree(mall_tc_entry);
1328 		return err;
1329 	}
1330 
1331 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1332 
1333 	return err;
1334 }
1335 
1336 static int
1337 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1338 				  struct tc_cls_matchall_offload *cls,
1339 				  bool ingress)
1340 {
1341 	struct netlink_ext_ack *extack = cls->common.extack;
1342 	struct dsa_port *dp = dsa_slave_to_port(dev);
1343 	struct dsa_slave_priv *p = netdev_priv(dev);
1344 	struct dsa_mall_policer_tc_entry *policer;
1345 	struct dsa_mall_tc_entry *mall_tc_entry;
1346 	struct dsa_switch *ds = dp->ds;
1347 	struct flow_action_entry *act;
1348 	int err;
1349 
1350 	if (!ds->ops->port_policer_add) {
1351 		NL_SET_ERR_MSG_MOD(extack,
1352 				   "Policing offload not implemented");
1353 		return -EOPNOTSUPP;
1354 	}
1355 
1356 	if (!ingress) {
1357 		NL_SET_ERR_MSG_MOD(extack,
1358 				   "Only supported on ingress qdisc");
1359 		return -EOPNOTSUPP;
1360 	}
1361 
1362 	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1363 					      cls->common.extack))
1364 		return -EOPNOTSUPP;
1365 
1366 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1367 		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1368 			NL_SET_ERR_MSG_MOD(extack,
1369 					   "Only one port policer allowed");
1370 			return -EEXIST;
1371 		}
1372 	}
1373 
1374 	act = &cls->rule->action.entries[0];
1375 
1376 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1377 	if (!mall_tc_entry)
1378 		return -ENOMEM;
1379 
1380 	mall_tc_entry->cookie = cls->cookie;
1381 	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1382 	policer = &mall_tc_entry->policer;
1383 	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1384 	policer->burst = act->police.burst;
1385 
1386 	err = ds->ops->port_policer_add(ds, dp->index, policer);
1387 	if (err) {
1388 		kfree(mall_tc_entry);
1389 		return err;
1390 	}
1391 
1392 	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1393 
1394 	return err;
1395 }
1396 
1397 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1398 				      struct tc_cls_matchall_offload *cls,
1399 				      bool ingress)
1400 {
1401 	int err = -EOPNOTSUPP;
1402 
1403 	if (cls->common.protocol == htons(ETH_P_ALL) &&
1404 	    flow_offload_has_one_action(&cls->rule->action) &&
1405 	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1406 		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1407 	else if (flow_offload_has_one_action(&cls->rule->action) &&
1408 		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1409 		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1410 
1411 	return err;
1412 }
1413 
1414 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1415 				       struct tc_cls_matchall_offload *cls)
1416 {
1417 	struct dsa_port *dp = dsa_slave_to_port(dev);
1418 	struct dsa_mall_tc_entry *mall_tc_entry;
1419 	struct dsa_switch *ds = dp->ds;
1420 
1421 	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1422 	if (!mall_tc_entry)
1423 		return;
1424 
1425 	list_del(&mall_tc_entry->list);
1426 
1427 	switch (mall_tc_entry->type) {
1428 	case DSA_PORT_MALL_MIRROR:
1429 		if (ds->ops->port_mirror_del)
1430 			ds->ops->port_mirror_del(ds, dp->index,
1431 						 &mall_tc_entry->mirror);
1432 		break;
1433 	case DSA_PORT_MALL_POLICER:
1434 		if (ds->ops->port_policer_del)
1435 			ds->ops->port_policer_del(ds, dp->index);
1436 		break;
1437 	default:
1438 		WARN_ON(1);
1439 	}
1440 
1441 	kfree(mall_tc_entry);
1442 }
1443 
1444 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1445 					   struct tc_cls_matchall_offload *cls,
1446 					   bool ingress)
1447 {
1448 	if (cls->common.chain_index)
1449 		return -EOPNOTSUPP;
1450 
1451 	switch (cls->command) {
1452 	case TC_CLSMATCHALL_REPLACE:
1453 		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1454 	case TC_CLSMATCHALL_DESTROY:
1455 		dsa_slave_del_cls_matchall(dev, cls);
1456 		return 0;
1457 	default:
1458 		return -EOPNOTSUPP;
1459 	}
1460 }
1461 
1462 static int dsa_slave_add_cls_flower(struct net_device *dev,
1463 				    struct flow_cls_offload *cls,
1464 				    bool ingress)
1465 {
1466 	struct dsa_port *dp = dsa_slave_to_port(dev);
1467 	struct dsa_switch *ds = dp->ds;
1468 	int port = dp->index;
1469 
1470 	if (!ds->ops->cls_flower_add)
1471 		return -EOPNOTSUPP;
1472 
1473 	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1474 }
1475 
1476 static int dsa_slave_del_cls_flower(struct net_device *dev,
1477 				    struct flow_cls_offload *cls,
1478 				    bool ingress)
1479 {
1480 	struct dsa_port *dp = dsa_slave_to_port(dev);
1481 	struct dsa_switch *ds = dp->ds;
1482 	int port = dp->index;
1483 
1484 	if (!ds->ops->cls_flower_del)
1485 		return -EOPNOTSUPP;
1486 
1487 	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1488 }
1489 
1490 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1491 				      struct flow_cls_offload *cls,
1492 				      bool ingress)
1493 {
1494 	struct dsa_port *dp = dsa_slave_to_port(dev);
1495 	struct dsa_switch *ds = dp->ds;
1496 	int port = dp->index;
1497 
1498 	if (!ds->ops->cls_flower_stats)
1499 		return -EOPNOTSUPP;
1500 
1501 	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1502 }
1503 
1504 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1505 					 struct flow_cls_offload *cls,
1506 					 bool ingress)
1507 {
1508 	switch (cls->command) {
1509 	case FLOW_CLS_REPLACE:
1510 		return dsa_slave_add_cls_flower(dev, cls, ingress);
1511 	case FLOW_CLS_DESTROY:
1512 		return dsa_slave_del_cls_flower(dev, cls, ingress);
1513 	case FLOW_CLS_STATS:
1514 		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1515 	default:
1516 		return -EOPNOTSUPP;
1517 	}
1518 }
1519 
1520 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1521 				       void *cb_priv, bool ingress)
1522 {
1523 	struct net_device *dev = cb_priv;
1524 
1525 	if (!tc_can_offload(dev))
1526 		return -EOPNOTSUPP;
1527 
1528 	switch (type) {
1529 	case TC_SETUP_CLSMATCHALL:
1530 		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1531 	case TC_SETUP_CLSFLOWER:
1532 		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1533 	default:
1534 		return -EOPNOTSUPP;
1535 	}
1536 }
1537 
1538 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1539 					  void *type_data, void *cb_priv)
1540 {
1541 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1542 }
1543 
1544 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1545 					  void *type_data, void *cb_priv)
1546 {
1547 	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1548 }
1549 
1550 static LIST_HEAD(dsa_slave_block_cb_list);
1551 
1552 static int dsa_slave_setup_tc_block(struct net_device *dev,
1553 				    struct flow_block_offload *f)
1554 {
1555 	struct flow_block_cb *block_cb;
1556 	flow_setup_cb_t *cb;
1557 
1558 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1559 		cb = dsa_slave_setup_tc_block_cb_ig;
1560 	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1561 		cb = dsa_slave_setup_tc_block_cb_eg;
1562 	else
1563 		return -EOPNOTSUPP;
1564 
1565 	f->driver_block_list = &dsa_slave_block_cb_list;
1566 
1567 	switch (f->command) {
1568 	case FLOW_BLOCK_BIND:
1569 		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1570 			return -EBUSY;
1571 
1572 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1573 		if (IS_ERR(block_cb))
1574 			return PTR_ERR(block_cb);
1575 
1576 		flow_block_cb_add(block_cb, f);
1577 		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1578 		return 0;
1579 	case FLOW_BLOCK_UNBIND:
1580 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1581 		if (!block_cb)
1582 			return -ENOENT;
1583 
1584 		flow_block_cb_remove(block_cb, f);
1585 		list_del(&block_cb->driver_list);
1586 		return 0;
1587 	default:
1588 		return -EOPNOTSUPP;
1589 	}
1590 }
1591 
1592 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1593 				    void *type_data)
1594 {
1595 	struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
1596 
1597 	if (!master->netdev_ops->ndo_setup_tc)
1598 		return -EOPNOTSUPP;
1599 
1600 	return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1601 }
1602 
1603 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1604 			      void *type_data)
1605 {
1606 	struct dsa_port *dp = dsa_slave_to_port(dev);
1607 	struct dsa_switch *ds = dp->ds;
1608 
1609 	switch (type) {
1610 	case TC_SETUP_BLOCK:
1611 		return dsa_slave_setup_tc_block(dev, type_data);
1612 	case TC_SETUP_FT:
1613 		return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1614 	default:
1615 		break;
1616 	}
1617 
1618 	if (!ds->ops->port_setup_tc)
1619 		return -EOPNOTSUPP;
1620 
1621 	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1622 }
1623 
1624 static int dsa_slave_get_rxnfc(struct net_device *dev,
1625 			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1626 {
1627 	struct dsa_port *dp = dsa_slave_to_port(dev);
1628 	struct dsa_switch *ds = dp->ds;
1629 
1630 	if (!ds->ops->get_rxnfc)
1631 		return -EOPNOTSUPP;
1632 
1633 	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1634 }
1635 
1636 static int dsa_slave_set_rxnfc(struct net_device *dev,
1637 			       struct ethtool_rxnfc *nfc)
1638 {
1639 	struct dsa_port *dp = dsa_slave_to_port(dev);
1640 	struct dsa_switch *ds = dp->ds;
1641 
1642 	if (!ds->ops->set_rxnfc)
1643 		return -EOPNOTSUPP;
1644 
1645 	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1646 }
1647 
1648 static int dsa_slave_get_ts_info(struct net_device *dev,
1649 				 struct ethtool_ts_info *ts)
1650 {
1651 	struct dsa_slave_priv *p = netdev_priv(dev);
1652 	struct dsa_switch *ds = p->dp->ds;
1653 
1654 	if (!ds->ops->get_ts_info)
1655 		return -EOPNOTSUPP;
1656 
1657 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1658 }
1659 
1660 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1661 				     u16 vid)
1662 {
1663 	struct dsa_port *dp = dsa_slave_to_port(dev);
1664 	struct switchdev_obj_port_vlan vlan = {
1665 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1666 		.vid = vid,
1667 		/* This API only allows programming tagged, non-PVID VIDs */
1668 		.flags = 0,
1669 	};
1670 	struct netlink_ext_ack extack = {0};
1671 	int ret;
1672 
1673 	/* User port... */
1674 	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1675 	if (ret) {
1676 		if (extack._msg)
1677 			netdev_err(dev, "%s\n", extack._msg);
1678 		return ret;
1679 	}
1680 
1681 	/* And CPU port... */
1682 	ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1683 	if (ret) {
1684 		if (extack._msg)
1685 			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1686 				   extack._msg);
1687 		return ret;
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1694 				      u16 vid)
1695 {
1696 	struct dsa_port *dp = dsa_slave_to_port(dev);
1697 	struct switchdev_obj_port_vlan vlan = {
1698 		.vid = vid,
1699 		/* This API only allows programming tagged, non-PVID VIDs */
1700 		.flags = 0,
1701 	};
1702 	int err;
1703 
1704 	err = dsa_port_vlan_del(dp, &vlan);
1705 	if (err)
1706 		return err;
1707 
1708 	return dsa_port_host_vlan_del(dp, &vlan);
1709 }
1710 
1711 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1712 {
1713 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1714 
1715 	return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1716 }
1717 
1718 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1719 {
1720 	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1721 
1722 	return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1723 }
1724 
1725 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1726  * filtering is enabled. The baseline is that only ports that offload a
1727  * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1728  * but there are exceptions for quirky hardware.
1729  *
1730  * If ds->vlan_filtering_is_global = true, then standalone ports which share
1731  * the same switch with other ports that offload a VLAN-aware bridge are also
1732  * inevitably VLAN-aware.
1733  *
1734  * To summarize, a DSA switch port offloads:
1735  *
1736  * - If standalone (this includes software bridge, software LAG):
1737  *     - if ds->needs_standalone_vlan_filtering = true, OR if
1738  *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1739  *       this switch chip which have vlan_filtering=1)
1740  *         - the 8021q upper VLANs
1741  *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1742  *       global, or it is, but no port is under a VLAN-aware bridge):
1743  *         - no VLAN (any 8021q upper is a software VLAN)
1744  *
1745  * - If under a vlan_filtering=0 bridge which it offload:
1746  *     - if ds->configure_vlan_while_not_filtering = true (default):
1747  *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1748  *     - else (deprecated):
1749  *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1750  *           enabled, so this behavior is broken and discouraged.
1751  *
1752  * - If under a vlan_filtering=1 bridge which it offload:
1753  *     - the bridge VLANs
1754  *     - the 8021q upper VLANs
1755  */
1756 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1757 				    bool vlan_filtering)
1758 {
1759 	int err;
1760 
1761 	if (vlan_filtering) {
1762 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1763 
1764 		err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1765 		if (err) {
1766 			vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1767 			slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1768 			return err;
1769 		}
1770 	} else {
1771 		err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1772 		if (err)
1773 			return err;
1774 
1775 		slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1776 	}
1777 
1778 	return 0;
1779 }
1780 
1781 struct dsa_hw_port {
1782 	struct list_head list;
1783 	struct net_device *dev;
1784 	int old_mtu;
1785 };
1786 
1787 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1788 {
1789 	const struct dsa_hw_port *p;
1790 	int err;
1791 
1792 	list_for_each_entry(p, hw_port_list, list) {
1793 		if (p->dev->mtu == mtu)
1794 			continue;
1795 
1796 		err = dev_set_mtu(p->dev, mtu);
1797 		if (err)
1798 			goto rollback;
1799 	}
1800 
1801 	return 0;
1802 
1803 rollback:
1804 	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1805 		if (p->dev->mtu == p->old_mtu)
1806 			continue;
1807 
1808 		if (dev_set_mtu(p->dev, p->old_mtu))
1809 			netdev_err(p->dev, "Failed to restore MTU\n");
1810 	}
1811 
1812 	return err;
1813 }
1814 
1815 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1816 {
1817 	struct dsa_hw_port *p, *n;
1818 
1819 	list_for_each_entry_safe(p, n, hw_port_list, list)
1820 		kfree(p);
1821 }
1822 
1823 /* Make the hardware datapath to/from @dev limited to a common MTU */
1824 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1825 {
1826 	struct list_head hw_port_list;
1827 	struct dsa_switch_tree *dst;
1828 	int min_mtu = ETH_MAX_MTU;
1829 	struct dsa_port *other_dp;
1830 	int err;
1831 
1832 	if (!dp->ds->mtu_enforcement_ingress)
1833 		return;
1834 
1835 	if (!dp->bridge)
1836 		return;
1837 
1838 	INIT_LIST_HEAD(&hw_port_list);
1839 
1840 	/* Populate the list of ports that are part of the same bridge
1841 	 * as the newly added/modified port
1842 	 */
1843 	list_for_each_entry(dst, &dsa_tree_list, list) {
1844 		list_for_each_entry(other_dp, &dst->ports, list) {
1845 			struct dsa_hw_port *hw_port;
1846 			struct net_device *slave;
1847 
1848 			if (other_dp->type != DSA_PORT_TYPE_USER)
1849 				continue;
1850 
1851 			if (!dsa_port_bridge_same(dp, other_dp))
1852 				continue;
1853 
1854 			if (!other_dp->ds->mtu_enforcement_ingress)
1855 				continue;
1856 
1857 			slave = other_dp->slave;
1858 
1859 			if (min_mtu > slave->mtu)
1860 				min_mtu = slave->mtu;
1861 
1862 			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1863 			if (!hw_port)
1864 				goto out;
1865 
1866 			hw_port->dev = slave;
1867 			hw_port->old_mtu = slave->mtu;
1868 
1869 			list_add(&hw_port->list, &hw_port_list);
1870 		}
1871 	}
1872 
1873 	/* Attempt to configure the entire hardware bridge to the newly added
1874 	 * interface's MTU first, regardless of whether the intention of the
1875 	 * user was to raise or lower it.
1876 	 */
1877 	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1878 	if (!err)
1879 		goto out;
1880 
1881 	/* Clearly that didn't work out so well, so just set the minimum MTU on
1882 	 * all hardware bridge ports now. If this fails too, then all ports will
1883 	 * still have their old MTU rolled back anyway.
1884 	 */
1885 	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1886 
1887 out:
1888 	dsa_hw_port_list_free(&hw_port_list);
1889 }
1890 
1891 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1892 {
1893 	struct net_device *master = dsa_slave_to_master(dev);
1894 	struct dsa_port *dp = dsa_slave_to_port(dev);
1895 	struct dsa_port *cpu_dp = dp->cpu_dp;
1896 	struct dsa_switch *ds = dp->ds;
1897 	struct dsa_port *other_dp;
1898 	int largest_mtu = 0;
1899 	int new_master_mtu;
1900 	int old_master_mtu;
1901 	int mtu_limit;
1902 	int cpu_mtu;
1903 	int err;
1904 
1905 	if (!ds->ops->port_change_mtu)
1906 		return -EOPNOTSUPP;
1907 
1908 	dsa_tree_for_each_user_port(other_dp, ds->dst) {
1909 		int slave_mtu;
1910 
1911 		/* During probe, this function will be called for each slave
1912 		 * device, while not all of them have been allocated. That's
1913 		 * ok, it doesn't change what the maximum is, so ignore it.
1914 		 */
1915 		if (!other_dp->slave)
1916 			continue;
1917 
1918 		/* Pretend that we already applied the setting, which we
1919 		 * actually haven't (still haven't done all integrity checks)
1920 		 */
1921 		if (dp == other_dp)
1922 			slave_mtu = new_mtu;
1923 		else
1924 			slave_mtu = other_dp->slave->mtu;
1925 
1926 		if (largest_mtu < slave_mtu)
1927 			largest_mtu = slave_mtu;
1928 	}
1929 
1930 	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1931 	old_master_mtu = master->mtu;
1932 	new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1933 	if (new_master_mtu > mtu_limit)
1934 		return -ERANGE;
1935 
1936 	/* If the master MTU isn't over limit, there's no need to check the CPU
1937 	 * MTU, since that surely isn't either.
1938 	 */
1939 	cpu_mtu = largest_mtu;
1940 
1941 	/* Start applying stuff */
1942 	if (new_master_mtu != old_master_mtu) {
1943 		err = dev_set_mtu(master, new_master_mtu);
1944 		if (err < 0)
1945 			goto out_master_failed;
1946 
1947 		/* We only need to propagate the MTU of the CPU port to
1948 		 * upstream switches, so emit a notifier which updates them.
1949 		 */
1950 		err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
1951 		if (err)
1952 			goto out_cpu_failed;
1953 	}
1954 
1955 	err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
1956 	if (err)
1957 		goto out_port_failed;
1958 
1959 	dev->mtu = new_mtu;
1960 
1961 	dsa_bridge_mtu_normalization(dp);
1962 
1963 	return 0;
1964 
1965 out_port_failed:
1966 	if (new_master_mtu != old_master_mtu)
1967 		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1968 				    dsa_tag_protocol_overhead(cpu_dp->tag_ops));
1969 out_cpu_failed:
1970 	if (new_master_mtu != old_master_mtu)
1971 		dev_set_mtu(master, old_master_mtu);
1972 out_master_failed:
1973 	return err;
1974 }
1975 
1976 static int __maybe_unused
1977 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
1978 {
1979 	struct dsa_port *dp = dsa_slave_to_port(dev);
1980 	struct dsa_switch *ds = dp->ds;
1981 	unsigned long mask, new_prio;
1982 	int err, port = dp->index;
1983 
1984 	if (!ds->ops->port_set_default_prio)
1985 		return -EOPNOTSUPP;
1986 
1987 	err = dcb_ieee_setapp(dev, app);
1988 	if (err)
1989 		return err;
1990 
1991 	mask = dcb_ieee_getapp_mask(dev, app);
1992 	new_prio = __fls(mask);
1993 
1994 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
1995 	if (err) {
1996 		dcb_ieee_delapp(dev, app);
1997 		return err;
1998 	}
1999 
2000 	return 0;
2001 }
2002 
2003 static int __maybe_unused
2004 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2005 {
2006 	struct dsa_port *dp = dsa_slave_to_port(dev);
2007 	struct dsa_switch *ds = dp->ds;
2008 	unsigned long mask, new_prio;
2009 	int err, port = dp->index;
2010 	u8 dscp = app->protocol;
2011 
2012 	if (!ds->ops->port_add_dscp_prio)
2013 		return -EOPNOTSUPP;
2014 
2015 	if (dscp >= 64) {
2016 		netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2017 			   dscp);
2018 		return -EINVAL;
2019 	}
2020 
2021 	err = dcb_ieee_setapp(dev, app);
2022 	if (err)
2023 		return err;
2024 
2025 	mask = dcb_ieee_getapp_mask(dev, app);
2026 	new_prio = __fls(mask);
2027 
2028 	err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2029 	if (err) {
2030 		dcb_ieee_delapp(dev, app);
2031 		return err;
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
2038 						      struct dcb_app *app)
2039 {
2040 	switch (app->selector) {
2041 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2042 		switch (app->protocol) {
2043 		case 0:
2044 			return dsa_slave_dcbnl_set_default_prio(dev, app);
2045 		default:
2046 			return -EOPNOTSUPP;
2047 		}
2048 		break;
2049 	case IEEE_8021QAZ_APP_SEL_DSCP:
2050 		return dsa_slave_dcbnl_add_dscp_prio(dev, app);
2051 	default:
2052 		return -EOPNOTSUPP;
2053 	}
2054 }
2055 
2056 static int __maybe_unused
2057 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2058 {
2059 	struct dsa_port *dp = dsa_slave_to_port(dev);
2060 	struct dsa_switch *ds = dp->ds;
2061 	unsigned long mask, new_prio;
2062 	int err, port = dp->index;
2063 
2064 	if (!ds->ops->port_set_default_prio)
2065 		return -EOPNOTSUPP;
2066 
2067 	err = dcb_ieee_delapp(dev, app);
2068 	if (err)
2069 		return err;
2070 
2071 	mask = dcb_ieee_getapp_mask(dev, app);
2072 	new_prio = mask ? __fls(mask) : 0;
2073 
2074 	err = ds->ops->port_set_default_prio(ds, port, new_prio);
2075 	if (err) {
2076 		dcb_ieee_setapp(dev, app);
2077 		return err;
2078 	}
2079 
2080 	return 0;
2081 }
2082 
2083 static int __maybe_unused
2084 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2085 {
2086 	struct dsa_port *dp = dsa_slave_to_port(dev);
2087 	struct dsa_switch *ds = dp->ds;
2088 	int err, port = dp->index;
2089 	u8 dscp = app->protocol;
2090 
2091 	if (!ds->ops->port_del_dscp_prio)
2092 		return -EOPNOTSUPP;
2093 
2094 	err = dcb_ieee_delapp(dev, app);
2095 	if (err)
2096 		return err;
2097 
2098 	err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2099 	if (err) {
2100 		dcb_ieee_setapp(dev, app);
2101 		return err;
2102 	}
2103 
2104 	return 0;
2105 }
2106 
2107 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
2108 						      struct dcb_app *app)
2109 {
2110 	switch (app->selector) {
2111 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2112 		switch (app->protocol) {
2113 		case 0:
2114 			return dsa_slave_dcbnl_del_default_prio(dev, app);
2115 		default:
2116 			return -EOPNOTSUPP;
2117 		}
2118 		break;
2119 	case IEEE_8021QAZ_APP_SEL_DSCP:
2120 		return dsa_slave_dcbnl_del_dscp_prio(dev, app);
2121 	default:
2122 		return -EOPNOTSUPP;
2123 	}
2124 }
2125 
2126 /* Pre-populate the DCB application priority table with the priorities
2127  * configured during switch setup, which we read from hardware here.
2128  */
2129 static int dsa_slave_dcbnl_init(struct net_device *dev)
2130 {
2131 	struct dsa_port *dp = dsa_slave_to_port(dev);
2132 	struct dsa_switch *ds = dp->ds;
2133 	int port = dp->index;
2134 	int err;
2135 
2136 	if (ds->ops->port_get_default_prio) {
2137 		int prio = ds->ops->port_get_default_prio(ds, port);
2138 		struct dcb_app app = {
2139 			.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2140 			.protocol = 0,
2141 			.priority = prio,
2142 		};
2143 
2144 		if (prio < 0)
2145 			return prio;
2146 
2147 		err = dcb_ieee_setapp(dev, &app);
2148 		if (err)
2149 			return err;
2150 	}
2151 
2152 	if (ds->ops->port_get_dscp_prio) {
2153 		int protocol;
2154 
2155 		for (protocol = 0; protocol < 64; protocol++) {
2156 			struct dcb_app app = {
2157 				.selector = IEEE_8021QAZ_APP_SEL_DSCP,
2158 				.protocol = protocol,
2159 			};
2160 			int prio;
2161 
2162 			prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2163 			if (prio == -EOPNOTSUPP)
2164 				continue;
2165 			if (prio < 0)
2166 				return prio;
2167 
2168 			app.priority = prio;
2169 
2170 			err = dcb_ieee_setapp(dev, &app);
2171 			if (err)
2172 				return err;
2173 		}
2174 	}
2175 
2176 	return 0;
2177 }
2178 
2179 static const struct ethtool_ops dsa_slave_ethtool_ops = {
2180 	.get_drvinfo		= dsa_slave_get_drvinfo,
2181 	.get_regs_len		= dsa_slave_get_regs_len,
2182 	.get_regs		= dsa_slave_get_regs,
2183 	.nway_reset		= dsa_slave_nway_reset,
2184 	.get_link		= ethtool_op_get_link,
2185 	.get_eeprom_len		= dsa_slave_get_eeprom_len,
2186 	.get_eeprom		= dsa_slave_get_eeprom,
2187 	.set_eeprom		= dsa_slave_set_eeprom,
2188 	.get_strings		= dsa_slave_get_strings,
2189 	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
2190 	.get_sset_count		= dsa_slave_get_sset_count,
2191 	.get_eth_phy_stats	= dsa_slave_get_eth_phy_stats,
2192 	.get_eth_mac_stats	= dsa_slave_get_eth_mac_stats,
2193 	.get_eth_ctrl_stats	= dsa_slave_get_eth_ctrl_stats,
2194 	.get_rmon_stats		= dsa_slave_get_rmon_stats,
2195 	.set_wol		= dsa_slave_set_wol,
2196 	.get_wol		= dsa_slave_get_wol,
2197 	.set_eee		= dsa_slave_set_eee,
2198 	.get_eee		= dsa_slave_get_eee,
2199 	.get_link_ksettings	= dsa_slave_get_link_ksettings,
2200 	.set_link_ksettings	= dsa_slave_set_link_ksettings,
2201 	.get_pause_stats	= dsa_slave_get_pause_stats,
2202 	.get_pauseparam		= dsa_slave_get_pauseparam,
2203 	.set_pauseparam		= dsa_slave_set_pauseparam,
2204 	.get_rxnfc		= dsa_slave_get_rxnfc,
2205 	.set_rxnfc		= dsa_slave_set_rxnfc,
2206 	.get_ts_info		= dsa_slave_get_ts_info,
2207 	.self_test		= dsa_slave_net_selftest,
2208 };
2209 
2210 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
2211 	.ieee_setapp		= dsa_slave_dcbnl_ieee_setapp,
2212 	.ieee_delapp		= dsa_slave_dcbnl_ieee_delapp,
2213 };
2214 
2215 static void dsa_slave_get_stats64(struct net_device *dev,
2216 				  struct rtnl_link_stats64 *s)
2217 {
2218 	struct dsa_port *dp = dsa_slave_to_port(dev);
2219 	struct dsa_switch *ds = dp->ds;
2220 
2221 	if (ds->ops->get_stats64)
2222 		ds->ops->get_stats64(ds, dp->index, s);
2223 	else
2224 		dev_get_tstats64(dev, s);
2225 }
2226 
2227 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
2228 				       struct net_device_path *path)
2229 {
2230 	struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
2231 	struct net_device *master = dsa_port_to_master(dp);
2232 	struct dsa_port *cpu_dp = dp->cpu_dp;
2233 
2234 	path->dev = ctx->dev;
2235 	path->type = DEV_PATH_DSA;
2236 	path->dsa.proto = cpu_dp->tag_ops->proto;
2237 	path->dsa.port = dp->index;
2238 	ctx->dev = master;
2239 
2240 	return 0;
2241 }
2242 
2243 static const struct net_device_ops dsa_slave_netdev_ops = {
2244 	.ndo_open	 	= dsa_slave_open,
2245 	.ndo_stop		= dsa_slave_close,
2246 	.ndo_start_xmit		= dsa_slave_xmit,
2247 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
2248 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
2249 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
2250 	.ndo_fdb_dump		= dsa_slave_fdb_dump,
2251 	.ndo_eth_ioctl		= dsa_slave_ioctl,
2252 	.ndo_get_iflink		= dsa_slave_get_iflink,
2253 #ifdef CONFIG_NET_POLL_CONTROLLER
2254 	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
2255 	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
2256 	.ndo_poll_controller	= dsa_slave_poll_controller,
2257 #endif
2258 	.ndo_setup_tc		= dsa_slave_setup_tc,
2259 	.ndo_get_stats64	= dsa_slave_get_stats64,
2260 	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
2261 	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
2262 	.ndo_change_mtu		= dsa_slave_change_mtu,
2263 	.ndo_fill_forward_path	= dsa_slave_fill_forward_path,
2264 };
2265 
2266 static struct device_type dsa_type = {
2267 	.name	= "dsa",
2268 };
2269 
2270 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2271 {
2272 	const struct dsa_port *dp = dsa_to_port(ds, port);
2273 
2274 	if (dp->pl)
2275 		phylink_mac_change(dp->pl, up);
2276 }
2277 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2278 
2279 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
2280 					  struct phylink_link_state *state)
2281 {
2282 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
2283 	struct dsa_switch *ds = dp->ds;
2284 
2285 	/* No need to check that this operation is valid, the callback would
2286 	 * not be called if it was not.
2287 	 */
2288 	ds->ops->phylink_fixed_state(ds, dp->index, state);
2289 }
2290 
2291 /* slave device setup *******************************************************/
2292 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
2293 				 u32 flags)
2294 {
2295 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2296 	struct dsa_switch *ds = dp->ds;
2297 
2298 	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
2299 	if (!slave_dev->phydev) {
2300 		netdev_err(slave_dev, "no phy at %d\n", addr);
2301 		return -ENODEV;
2302 	}
2303 
2304 	slave_dev->phydev->dev_flags |= flags;
2305 
2306 	return phylink_connect_phy(dp->pl, slave_dev->phydev);
2307 }
2308 
2309 static int dsa_slave_phy_setup(struct net_device *slave_dev)
2310 {
2311 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2312 	struct device_node *port_dn = dp->dn;
2313 	struct dsa_switch *ds = dp->ds;
2314 	u32 phy_flags = 0;
2315 	int ret;
2316 
2317 	dp->pl_config.dev = &slave_dev->dev;
2318 	dp->pl_config.type = PHYLINK_NETDEV;
2319 
2320 	/* The get_fixed_state callback takes precedence over polling the
2321 	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
2322 	 * this if the switch provides such a callback.
2323 	 */
2324 	if (ds->ops->phylink_fixed_state) {
2325 		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2326 		dp->pl_config.poll_fixed_state = true;
2327 	}
2328 
2329 	ret = dsa_port_phylink_create(dp);
2330 	if (ret)
2331 		return ret;
2332 
2333 	if (ds->ops->get_phy_flags)
2334 		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2335 
2336 	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2337 	if (ret == -ENODEV && ds->slave_mii_bus) {
2338 		/* We could not connect to a designated PHY or SFP, so try to
2339 		 * use the switch internal MDIO bus instead
2340 		 */
2341 		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2342 	}
2343 	if (ret) {
2344 		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2345 			   ERR_PTR(ret));
2346 		dsa_port_phylink_destroy(dp);
2347 	}
2348 
2349 	return ret;
2350 }
2351 
2352 void dsa_slave_setup_tagger(struct net_device *slave)
2353 {
2354 	struct dsa_port *dp = dsa_slave_to_port(slave);
2355 	struct net_device *master = dsa_port_to_master(dp);
2356 	struct dsa_slave_priv *p = netdev_priv(slave);
2357 	const struct dsa_port *cpu_dp = dp->cpu_dp;
2358 	const struct dsa_switch *ds = dp->ds;
2359 
2360 	slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2361 	slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2362 	/* Try to save one extra realloc later in the TX path (in the master)
2363 	 * by also inheriting the master's needed headroom and tailroom.
2364 	 * The 8021q driver also does this.
2365 	 */
2366 	slave->needed_headroom += master->needed_headroom;
2367 	slave->needed_tailroom += master->needed_tailroom;
2368 
2369 	p->xmit = cpu_dp->tag_ops->xmit;
2370 
2371 	slave->features = master->vlan_features | NETIF_F_HW_TC;
2372 	slave->hw_features |= NETIF_F_HW_TC;
2373 	slave->features |= NETIF_F_LLTX;
2374 	if (slave->needed_tailroom)
2375 		slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2376 	if (ds->needs_standalone_vlan_filtering)
2377 		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2378 }
2379 
2380 int dsa_slave_suspend(struct net_device *slave_dev)
2381 {
2382 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2383 
2384 	if (!netif_running(slave_dev))
2385 		return 0;
2386 
2387 	netif_device_detach(slave_dev);
2388 
2389 	rtnl_lock();
2390 	phylink_stop(dp->pl);
2391 	rtnl_unlock();
2392 
2393 	return 0;
2394 }
2395 
2396 int dsa_slave_resume(struct net_device *slave_dev)
2397 {
2398 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2399 
2400 	if (!netif_running(slave_dev))
2401 		return 0;
2402 
2403 	netif_device_attach(slave_dev);
2404 
2405 	rtnl_lock();
2406 	phylink_start(dp->pl);
2407 	rtnl_unlock();
2408 
2409 	return 0;
2410 }
2411 
2412 int dsa_slave_create(struct dsa_port *port)
2413 {
2414 	struct net_device *master = dsa_port_to_master(port);
2415 	struct dsa_switch *ds = port->ds;
2416 	struct net_device *slave_dev;
2417 	struct dsa_slave_priv *p;
2418 	const char *name;
2419 	int assign_type;
2420 	int ret;
2421 
2422 	if (!ds->num_tx_queues)
2423 		ds->num_tx_queues = 1;
2424 
2425 	if (port->name) {
2426 		name = port->name;
2427 		assign_type = NET_NAME_PREDICTABLE;
2428 	} else {
2429 		name = "eth%d";
2430 		assign_type = NET_NAME_ENUM;
2431 	}
2432 
2433 	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2434 				     assign_type, ether_setup,
2435 				     ds->num_tx_queues, 1);
2436 	if (slave_dev == NULL)
2437 		return -ENOMEM;
2438 
2439 	slave_dev->rtnl_link_ops = &dsa_link_ops;
2440 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2441 #if IS_ENABLED(CONFIG_DCB)
2442 	slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
2443 #endif
2444 	if (!is_zero_ether_addr(port->mac))
2445 		eth_hw_addr_set(slave_dev, port->mac);
2446 	else
2447 		eth_hw_addr_inherit(slave_dev, master);
2448 	slave_dev->priv_flags |= IFF_NO_QUEUE;
2449 	if (dsa_switch_supports_uc_filtering(ds))
2450 		slave_dev->priv_flags |= IFF_UNICAST_FLT;
2451 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2452 	if (ds->ops->port_max_mtu)
2453 		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2454 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2455 
2456 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
2457 	SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
2458 	slave_dev->dev.of_node = port->dn;
2459 	slave_dev->vlan_features = master->vlan_features;
2460 
2461 	p = netdev_priv(slave_dev);
2462 	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2463 	if (!slave_dev->tstats) {
2464 		free_netdev(slave_dev);
2465 		return -ENOMEM;
2466 	}
2467 
2468 	ret = gro_cells_init(&p->gcells, slave_dev);
2469 	if (ret)
2470 		goto out_free;
2471 
2472 	p->dp = port;
2473 	INIT_LIST_HEAD(&p->mall_tc_list);
2474 	port->slave = slave_dev;
2475 	dsa_slave_setup_tagger(slave_dev);
2476 
2477 	netif_carrier_off(slave_dev);
2478 
2479 	ret = dsa_slave_phy_setup(slave_dev);
2480 	if (ret) {
2481 		netdev_err(slave_dev,
2482 			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
2483 			   ret, ds->dst->index, ds->index, port->index);
2484 		goto out_gcells;
2485 	}
2486 
2487 	rtnl_lock();
2488 
2489 	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2490 	if (ret && ret != -EOPNOTSUPP)
2491 		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2492 			 ret, ETH_DATA_LEN, port->index);
2493 
2494 	ret = register_netdevice(slave_dev);
2495 	if (ret) {
2496 		netdev_err(master, "error %d registering interface %s\n",
2497 			   ret, slave_dev->name);
2498 		rtnl_unlock();
2499 		goto out_phy;
2500 	}
2501 
2502 	if (IS_ENABLED(CONFIG_DCB)) {
2503 		ret = dsa_slave_dcbnl_init(slave_dev);
2504 		if (ret) {
2505 			netdev_err(slave_dev,
2506 				   "failed to initialize DCB: %pe\n",
2507 				   ERR_PTR(ret));
2508 			rtnl_unlock();
2509 			goto out_unregister;
2510 		}
2511 	}
2512 
2513 	ret = netdev_upper_dev_link(master, slave_dev, NULL);
2514 
2515 	rtnl_unlock();
2516 
2517 	if (ret)
2518 		goto out_unregister;
2519 
2520 	return 0;
2521 
2522 out_unregister:
2523 	unregister_netdev(slave_dev);
2524 out_phy:
2525 	rtnl_lock();
2526 	phylink_disconnect_phy(p->dp->pl);
2527 	rtnl_unlock();
2528 	dsa_port_phylink_destroy(p->dp);
2529 out_gcells:
2530 	gro_cells_destroy(&p->gcells);
2531 out_free:
2532 	free_percpu(slave_dev->tstats);
2533 	free_netdev(slave_dev);
2534 	port->slave = NULL;
2535 	return ret;
2536 }
2537 
2538 void dsa_slave_destroy(struct net_device *slave_dev)
2539 {
2540 	struct net_device *master = dsa_slave_to_master(slave_dev);
2541 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2542 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
2543 
2544 	netif_carrier_off(slave_dev);
2545 	rtnl_lock();
2546 	netdev_upper_dev_unlink(master, slave_dev);
2547 	unregister_netdevice(slave_dev);
2548 	phylink_disconnect_phy(dp->pl);
2549 	rtnl_unlock();
2550 
2551 	dsa_port_phylink_destroy(dp);
2552 	gro_cells_destroy(&p->gcells);
2553 	free_percpu(slave_dev->tstats);
2554 	free_netdev(slave_dev);
2555 }
2556 
2557 int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
2558 			    struct netlink_ext_ack *extack)
2559 {
2560 	struct net_device *old_master = dsa_slave_to_master(dev);
2561 	struct dsa_port *dp = dsa_slave_to_port(dev);
2562 	struct dsa_switch *ds = dp->ds;
2563 	struct net_device *upper;
2564 	struct list_head *iter;
2565 	int err;
2566 
2567 	if (master == old_master)
2568 		return 0;
2569 
2570 	if (!ds->ops->port_change_master) {
2571 		NL_SET_ERR_MSG_MOD(extack,
2572 				   "Driver does not support changing DSA master");
2573 		return -EOPNOTSUPP;
2574 	}
2575 
2576 	if (!netdev_uses_dsa(master)) {
2577 		NL_SET_ERR_MSG_MOD(extack,
2578 				   "Interface not eligible as DSA master");
2579 		return -EOPNOTSUPP;
2580 	}
2581 
2582 	netdev_for_each_upper_dev_rcu(master, upper, iter) {
2583 		if (dsa_slave_dev_check(upper))
2584 			continue;
2585 		if (netif_is_bridge_master(upper))
2586 			continue;
2587 		NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
2588 		return -EOPNOTSUPP;
2589 	}
2590 
2591 	/* Since we allow live-changing the DSA master, plus we auto-open the
2592 	 * DSA master when the user port opens => we need to ensure that the
2593 	 * new DSA master is open too.
2594 	 */
2595 	if (dev->flags & IFF_UP) {
2596 		err = dev_open(master, extack);
2597 		if (err)
2598 			return err;
2599 	}
2600 
2601 	netdev_upper_dev_unlink(old_master, dev);
2602 
2603 	err = netdev_upper_dev_link(master, dev, extack);
2604 	if (err)
2605 		goto out_revert_old_master_unlink;
2606 
2607 	err = dsa_port_change_master(dp, master, extack);
2608 	if (err)
2609 		goto out_revert_master_link;
2610 
2611 	/* Update the MTU of the new CPU port through cross-chip notifiers */
2612 	err = dsa_slave_change_mtu(dev, dev->mtu);
2613 	if (err && err != -EOPNOTSUPP) {
2614 		netdev_warn(dev,
2615 			    "nonfatal error updating MTU with new master: %pe\n",
2616 			    ERR_PTR(err));
2617 	}
2618 
2619 	/* If the port doesn't have its own MAC address and relies on the DSA
2620 	 * master's one, inherit it again from the new DSA master.
2621 	 */
2622 	if (is_zero_ether_addr(dp->mac))
2623 		eth_hw_addr_inherit(dev, master);
2624 
2625 	return 0;
2626 
2627 out_revert_master_link:
2628 	netdev_upper_dev_unlink(master, dev);
2629 out_revert_old_master_unlink:
2630 	netdev_upper_dev_link(old_master, dev, NULL);
2631 	return err;
2632 }
2633 
2634 bool dsa_slave_dev_check(const struct net_device *dev)
2635 {
2636 	return dev->netdev_ops == &dsa_slave_netdev_ops;
2637 }
2638 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2639 
2640 static int dsa_slave_changeupper(struct net_device *dev,
2641 				 struct netdev_notifier_changeupper_info *info)
2642 {
2643 	struct dsa_port *dp = dsa_slave_to_port(dev);
2644 	struct netlink_ext_ack *extack;
2645 	int err = NOTIFY_DONE;
2646 
2647 	if (!dsa_slave_dev_check(dev))
2648 		return err;
2649 
2650 	extack = netdev_notifier_info_to_extack(&info->info);
2651 
2652 	if (netif_is_bridge_master(info->upper_dev)) {
2653 		if (info->linking) {
2654 			err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2655 			if (!err)
2656 				dsa_bridge_mtu_normalization(dp);
2657 			if (err == -EOPNOTSUPP) {
2658 				if (extack && !extack->_msg)
2659 					NL_SET_ERR_MSG_MOD(extack,
2660 							   "Offloading not supported");
2661 				err = 0;
2662 			}
2663 			err = notifier_from_errno(err);
2664 		} else {
2665 			dsa_port_bridge_leave(dp, info->upper_dev);
2666 			err = NOTIFY_OK;
2667 		}
2668 	} else if (netif_is_lag_master(info->upper_dev)) {
2669 		if (info->linking) {
2670 			err = dsa_port_lag_join(dp, info->upper_dev,
2671 						info->upper_info, extack);
2672 			if (err == -EOPNOTSUPP) {
2673 				NL_SET_ERR_MSG_MOD(info->info.extack,
2674 						   "Offloading not supported");
2675 				err = 0;
2676 			}
2677 			err = notifier_from_errno(err);
2678 		} else {
2679 			dsa_port_lag_leave(dp, info->upper_dev);
2680 			err = NOTIFY_OK;
2681 		}
2682 	} else if (is_hsr_master(info->upper_dev)) {
2683 		if (info->linking) {
2684 			err = dsa_port_hsr_join(dp, info->upper_dev);
2685 			if (err == -EOPNOTSUPP) {
2686 				NL_SET_ERR_MSG_MOD(info->info.extack,
2687 						   "Offloading not supported");
2688 				err = 0;
2689 			}
2690 			err = notifier_from_errno(err);
2691 		} else {
2692 			dsa_port_hsr_leave(dp, info->upper_dev);
2693 			err = NOTIFY_OK;
2694 		}
2695 	}
2696 
2697 	return err;
2698 }
2699 
2700 static int dsa_slave_prechangeupper(struct net_device *dev,
2701 				    struct netdev_notifier_changeupper_info *info)
2702 {
2703 	struct dsa_port *dp = dsa_slave_to_port(dev);
2704 
2705 	if (!dsa_slave_dev_check(dev))
2706 		return NOTIFY_DONE;
2707 
2708 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2709 		dsa_port_pre_bridge_leave(dp, info->upper_dev);
2710 	else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2711 		dsa_port_pre_lag_leave(dp, info->upper_dev);
2712 	/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2713 	 * meaningfully enslaved to a bridge yet
2714 	 */
2715 
2716 	return NOTIFY_DONE;
2717 }
2718 
2719 static int
2720 dsa_slave_lag_changeupper(struct net_device *dev,
2721 			  struct netdev_notifier_changeupper_info *info)
2722 {
2723 	struct net_device *lower;
2724 	struct list_head *iter;
2725 	int err = NOTIFY_DONE;
2726 	struct dsa_port *dp;
2727 
2728 	if (!netif_is_lag_master(dev))
2729 		return err;
2730 
2731 	netdev_for_each_lower_dev(dev, lower, iter) {
2732 		if (!dsa_slave_dev_check(lower))
2733 			continue;
2734 
2735 		dp = dsa_slave_to_port(lower);
2736 		if (!dp->lag)
2737 			/* Software LAG */
2738 			continue;
2739 
2740 		err = dsa_slave_changeupper(lower, info);
2741 		if (notifier_to_errno(err))
2742 			break;
2743 	}
2744 
2745 	return err;
2746 }
2747 
2748 /* Same as dsa_slave_lag_changeupper() except that it calls
2749  * dsa_slave_prechangeupper()
2750  */
2751 static int
2752 dsa_slave_lag_prechangeupper(struct net_device *dev,
2753 			     struct netdev_notifier_changeupper_info *info)
2754 {
2755 	struct net_device *lower;
2756 	struct list_head *iter;
2757 	int err = NOTIFY_DONE;
2758 	struct dsa_port *dp;
2759 
2760 	if (!netif_is_lag_master(dev))
2761 		return err;
2762 
2763 	netdev_for_each_lower_dev(dev, lower, iter) {
2764 		if (!dsa_slave_dev_check(lower))
2765 			continue;
2766 
2767 		dp = dsa_slave_to_port(lower);
2768 		if (!dp->lag)
2769 			/* Software LAG */
2770 			continue;
2771 
2772 		err = dsa_slave_prechangeupper(lower, info);
2773 		if (notifier_to_errno(err))
2774 			break;
2775 	}
2776 
2777 	return err;
2778 }
2779 
2780 static int
2781 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2782 				 struct netdev_notifier_changeupper_info *info)
2783 {
2784 	struct netlink_ext_ack *ext_ack;
2785 	struct net_device *slave, *br;
2786 	struct dsa_port *dp;
2787 
2788 	ext_ack = netdev_notifier_info_to_extack(&info->info);
2789 
2790 	if (!is_vlan_dev(dev))
2791 		return NOTIFY_DONE;
2792 
2793 	slave = vlan_dev_real_dev(dev);
2794 	if (!dsa_slave_dev_check(slave))
2795 		return NOTIFY_DONE;
2796 
2797 	dp = dsa_slave_to_port(slave);
2798 	br = dsa_port_bridge_dev_get(dp);
2799 	if (!br)
2800 		return NOTIFY_DONE;
2801 
2802 	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2803 	if (br_vlan_enabled(br) &&
2804 	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2805 		NL_SET_ERR_MSG_MOD(ext_ack,
2806 				   "Cannot enslave VLAN device into VLAN aware bridge");
2807 		return notifier_from_errno(-EINVAL);
2808 	}
2809 
2810 	return NOTIFY_DONE;
2811 }
2812 
2813 static int
2814 dsa_slave_check_8021q_upper(struct net_device *dev,
2815 			    struct netdev_notifier_changeupper_info *info)
2816 {
2817 	struct dsa_port *dp = dsa_slave_to_port(dev);
2818 	struct net_device *br = dsa_port_bridge_dev_get(dp);
2819 	struct bridge_vlan_info br_info;
2820 	struct netlink_ext_ack *extack;
2821 	int err = NOTIFY_DONE;
2822 	u16 vid;
2823 
2824 	if (!br || !br_vlan_enabled(br))
2825 		return NOTIFY_DONE;
2826 
2827 	extack = netdev_notifier_info_to_extack(&info->info);
2828 	vid = vlan_dev_vlan_id(info->upper_dev);
2829 
2830 	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2831 	 * device, respectively the VID is not found, returning
2832 	 * 0 means success, which is a failure for us here.
2833 	 */
2834 	err = br_vlan_get_info(br, vid, &br_info);
2835 	if (err == 0) {
2836 		NL_SET_ERR_MSG_MOD(extack,
2837 				   "This VLAN is already configured by the bridge");
2838 		return notifier_from_errno(-EBUSY);
2839 	}
2840 
2841 	return NOTIFY_DONE;
2842 }
2843 
2844 static int
2845 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2846 				      struct netdev_notifier_changeupper_info *info)
2847 {
2848 	struct dsa_switch *ds;
2849 	struct dsa_port *dp;
2850 	int err;
2851 
2852 	if (!dsa_slave_dev_check(dev))
2853 		return dsa_prevent_bridging_8021q_upper(dev, info);
2854 
2855 	dp = dsa_slave_to_port(dev);
2856 	ds = dp->ds;
2857 
2858 	if (ds->ops->port_prechangeupper) {
2859 		err = ds->ops->port_prechangeupper(ds, dp->index, info);
2860 		if (err)
2861 			return notifier_from_errno(err);
2862 	}
2863 
2864 	if (is_vlan_dev(info->upper_dev))
2865 		return dsa_slave_check_8021q_upper(dev, info);
2866 
2867 	return NOTIFY_DONE;
2868 }
2869 
2870 /* To be eligible as a DSA master, a LAG must have all lower interfaces be
2871  * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
2872  * switches in the same switch tree.
2873  */
2874 static int dsa_lag_master_validate(struct net_device *lag_dev,
2875 				   struct netlink_ext_ack *extack)
2876 {
2877 	struct net_device *lower1, *lower2;
2878 	struct list_head *iter1, *iter2;
2879 
2880 	netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
2881 		netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
2882 			if (!netdev_uses_dsa(lower1) ||
2883 			    !netdev_uses_dsa(lower2)) {
2884 				NL_SET_ERR_MSG_MOD(extack,
2885 						   "All LAG ports must be eligible as DSA masters");
2886 				return notifier_from_errno(-EINVAL);
2887 			}
2888 
2889 			if (lower1 == lower2)
2890 				continue;
2891 
2892 			if (!dsa_port_tree_same(lower1->dsa_ptr,
2893 						lower2->dsa_ptr)) {
2894 				NL_SET_ERR_MSG_MOD(extack,
2895 						   "LAG contains DSA masters of disjoint switch trees");
2896 				return notifier_from_errno(-EINVAL);
2897 			}
2898 		}
2899 	}
2900 
2901 	return NOTIFY_DONE;
2902 }
2903 
2904 static int
2905 dsa_master_prechangeupper_sanity_check(struct net_device *master,
2906 				       struct netdev_notifier_changeupper_info *info)
2907 {
2908 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
2909 
2910 	if (!netdev_uses_dsa(master))
2911 		return NOTIFY_DONE;
2912 
2913 	if (!info->linking)
2914 		return NOTIFY_DONE;
2915 
2916 	/* Allow DSA switch uppers */
2917 	if (dsa_slave_dev_check(info->upper_dev))
2918 		return NOTIFY_DONE;
2919 
2920 	/* Allow bridge uppers of DSA masters, subject to further
2921 	 * restrictions in dsa_bridge_prechangelower_sanity_check()
2922 	 */
2923 	if (netif_is_bridge_master(info->upper_dev))
2924 		return NOTIFY_DONE;
2925 
2926 	/* Allow LAG uppers, subject to further restrictions in
2927 	 * dsa_lag_master_prechangelower_sanity_check()
2928 	 */
2929 	if (netif_is_lag_master(info->upper_dev))
2930 		return dsa_lag_master_validate(info->upper_dev, extack);
2931 
2932 	NL_SET_ERR_MSG_MOD(extack,
2933 			   "DSA master cannot join unknown upper interfaces");
2934 	return notifier_from_errno(-EBUSY);
2935 }
2936 
2937 static int
2938 dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
2939 					   struct netdev_notifier_changeupper_info *info)
2940 {
2941 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
2942 	struct net_device *lag_dev = info->upper_dev;
2943 	struct net_device *lower;
2944 	struct list_head *iter;
2945 
2946 	if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
2947 		return NOTIFY_DONE;
2948 
2949 	if (!info->linking)
2950 		return NOTIFY_DONE;
2951 
2952 	if (!netdev_uses_dsa(dev)) {
2953 		NL_SET_ERR_MSG(extack,
2954 			       "Only DSA masters can join a LAG DSA master");
2955 		return notifier_from_errno(-EINVAL);
2956 	}
2957 
2958 	netdev_for_each_lower_dev(lag_dev, lower, iter) {
2959 		if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
2960 			NL_SET_ERR_MSG(extack,
2961 				       "Interface is DSA master for a different switch tree than this LAG");
2962 			return notifier_from_errno(-EINVAL);
2963 		}
2964 
2965 		break;
2966 	}
2967 
2968 	return NOTIFY_DONE;
2969 }
2970 
2971 /* Don't allow bridging of DSA masters, since the bridge layer rx_handler
2972  * prevents the DSA fake ethertype handler to be invoked, so we don't get the
2973  * chance to strip off and parse the DSA switch tag protocol header (the bridge
2974  * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
2975  * frames).
2976  * The only case where that would not be an issue is when bridging can already
2977  * be offloaded, such as when the DSA master is itself a DSA or plain switchdev
2978  * port, and is bridged only with other ports from the same hardware device.
2979  */
2980 static int
2981 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
2982 				       struct netdev_notifier_changeupper_info *info)
2983 {
2984 	struct net_device *br = info->upper_dev;
2985 	struct netlink_ext_ack *extack;
2986 	struct net_device *lower;
2987 	struct list_head *iter;
2988 
2989 	if (!netif_is_bridge_master(br))
2990 		return NOTIFY_DONE;
2991 
2992 	if (!info->linking)
2993 		return NOTIFY_DONE;
2994 
2995 	extack = netdev_notifier_info_to_extack(&info->info);
2996 
2997 	netdev_for_each_lower_dev(br, lower, iter) {
2998 		if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
2999 			continue;
3000 
3001 		if (!netdev_port_same_parent_id(lower, new_lower)) {
3002 			NL_SET_ERR_MSG(extack,
3003 				       "Cannot do software bridging with a DSA master");
3004 			return notifier_from_errno(-EINVAL);
3005 		}
3006 	}
3007 
3008 	return NOTIFY_DONE;
3009 }
3010 
3011 static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
3012 						   struct net_device *lag_dev)
3013 {
3014 	struct net_device *new_master = dsa_tree_find_first_master(dst);
3015 	struct dsa_port *dp;
3016 	int err;
3017 
3018 	dsa_tree_for_each_user_port(dp, dst) {
3019 		if (dsa_port_to_master(dp) != lag_dev)
3020 			continue;
3021 
3022 		err = dsa_slave_change_master(dp->slave, new_master, NULL);
3023 		if (err) {
3024 			netdev_err(dp->slave,
3025 				   "failed to restore master to %s: %pe\n",
3026 				   new_master->name, ERR_PTR(err));
3027 		}
3028 	}
3029 }
3030 
3031 static int dsa_master_lag_join(struct net_device *master,
3032 			       struct net_device *lag_dev,
3033 			       struct netdev_lag_upper_info *uinfo,
3034 			       struct netlink_ext_ack *extack)
3035 {
3036 	struct dsa_port *cpu_dp = master->dsa_ptr;
3037 	struct dsa_switch_tree *dst = cpu_dp->dst;
3038 	struct dsa_port *dp;
3039 	int err;
3040 
3041 	err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3042 	if (err)
3043 		return err;
3044 
3045 	dsa_tree_for_each_user_port(dp, dst) {
3046 		if (dsa_port_to_master(dp) != master)
3047 			continue;
3048 
3049 		err = dsa_slave_change_master(dp->slave, lag_dev, extack);
3050 		if (err)
3051 			goto restore;
3052 	}
3053 
3054 	return 0;
3055 
3056 restore:
3057 	dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3058 		if (dsa_port_to_master(dp) != lag_dev)
3059 			continue;
3060 
3061 		err = dsa_slave_change_master(dp->slave, master, NULL);
3062 		if (err) {
3063 			netdev_err(dp->slave,
3064 				   "failed to restore master to %s: %pe\n",
3065 				   master->name, ERR_PTR(err));
3066 		}
3067 	}
3068 
3069 	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3070 
3071 	return err;
3072 }
3073 
3074 static void dsa_master_lag_leave(struct net_device *master,
3075 				 struct net_device *lag_dev)
3076 {
3077 	struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3078 	struct dsa_switch_tree *dst = cpu_dp->dst;
3079 	struct dsa_port *new_cpu_dp = NULL;
3080 	struct net_device *lower;
3081 	struct list_head *iter;
3082 
3083 	netdev_for_each_lower_dev(lag_dev, lower, iter) {
3084 		if (netdev_uses_dsa(lower)) {
3085 			new_cpu_dp = lower->dsa_ptr;
3086 			break;
3087 		}
3088 	}
3089 
3090 	if (new_cpu_dp) {
3091 		/* Update the CPU port of the user ports still under the LAG
3092 		 * so that dsa_port_to_master() continues to work properly
3093 		 */
3094 		dsa_tree_for_each_user_port(dp, dst)
3095 			if (dsa_port_to_master(dp) == lag_dev)
3096 				dp->cpu_dp = new_cpu_dp;
3097 
3098 		/* Update the index of the virtual CPU port to match the lowest
3099 		 * physical CPU port
3100 		 */
3101 		lag_dev->dsa_ptr = new_cpu_dp;
3102 		wmb();
3103 	} else {
3104 		/* If the LAG DSA master has no ports left, migrate back all
3105 		 * user ports to the first physical CPU port
3106 		 */
3107 		dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
3108 	}
3109 
3110 	/* This DSA master has left its LAG in any case, so let
3111 	 * the CPU port leave the hardware LAG as well
3112 	 */
3113 	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3114 }
3115 
3116 static int dsa_master_changeupper(struct net_device *dev,
3117 				  struct netdev_notifier_changeupper_info *info)
3118 {
3119 	struct netlink_ext_ack *extack;
3120 	int err = NOTIFY_DONE;
3121 
3122 	if (!netdev_uses_dsa(dev))
3123 		return err;
3124 
3125 	extack = netdev_notifier_info_to_extack(&info->info);
3126 
3127 	if (netif_is_lag_master(info->upper_dev)) {
3128 		if (info->linking) {
3129 			err = dsa_master_lag_join(dev, info->upper_dev,
3130 						  info->upper_info, extack);
3131 			err = notifier_from_errno(err);
3132 		} else {
3133 			dsa_master_lag_leave(dev, info->upper_dev);
3134 			err = NOTIFY_OK;
3135 		}
3136 	}
3137 
3138 	return err;
3139 }
3140 
3141 static int dsa_slave_netdevice_event(struct notifier_block *nb,
3142 				     unsigned long event, void *ptr)
3143 {
3144 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3145 
3146 	switch (event) {
3147 	case NETDEV_PRECHANGEUPPER: {
3148 		struct netdev_notifier_changeupper_info *info = ptr;
3149 		int err;
3150 
3151 		err = dsa_slave_prechangeupper_sanity_check(dev, info);
3152 		if (notifier_to_errno(err))
3153 			return err;
3154 
3155 		err = dsa_master_prechangeupper_sanity_check(dev, info);
3156 		if (notifier_to_errno(err))
3157 			return err;
3158 
3159 		err = dsa_lag_master_prechangelower_sanity_check(dev, info);
3160 		if (notifier_to_errno(err))
3161 			return err;
3162 
3163 		err = dsa_bridge_prechangelower_sanity_check(dev, info);
3164 		if (notifier_to_errno(err))
3165 			return err;
3166 
3167 		err = dsa_slave_prechangeupper(dev, ptr);
3168 		if (notifier_to_errno(err))
3169 			return err;
3170 
3171 		err = dsa_slave_lag_prechangeupper(dev, ptr);
3172 		if (notifier_to_errno(err))
3173 			return err;
3174 
3175 		break;
3176 	}
3177 	case NETDEV_CHANGEUPPER: {
3178 		int err;
3179 
3180 		err = dsa_slave_changeupper(dev, ptr);
3181 		if (notifier_to_errno(err))
3182 			return err;
3183 
3184 		err = dsa_slave_lag_changeupper(dev, ptr);
3185 		if (notifier_to_errno(err))
3186 			return err;
3187 
3188 		err = dsa_master_changeupper(dev, ptr);
3189 		if (notifier_to_errno(err))
3190 			return err;
3191 
3192 		break;
3193 	}
3194 	case NETDEV_CHANGELOWERSTATE: {
3195 		struct netdev_notifier_changelowerstate_info *info = ptr;
3196 		struct dsa_port *dp;
3197 		int err = 0;
3198 
3199 		if (dsa_slave_dev_check(dev)) {
3200 			dp = dsa_slave_to_port(dev);
3201 
3202 			err = dsa_port_lag_change(dp, info->lower_state_info);
3203 		}
3204 
3205 		/* Mirror LAG port events on DSA masters that are in
3206 		 * a LAG towards their respective switch CPU ports
3207 		 */
3208 		if (netdev_uses_dsa(dev)) {
3209 			dp = dev->dsa_ptr;
3210 
3211 			err = dsa_port_lag_change(dp, info->lower_state_info);
3212 		}
3213 
3214 		return notifier_from_errno(err);
3215 	}
3216 	case NETDEV_CHANGE:
3217 	case NETDEV_UP: {
3218 		/* Track state of master port.
3219 		 * DSA driver may require the master port (and indirectly
3220 		 * the tagger) to be available for some special operation.
3221 		 */
3222 		if (netdev_uses_dsa(dev)) {
3223 			struct dsa_port *cpu_dp = dev->dsa_ptr;
3224 			struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3225 
3226 			/* Track when the master port is UP */
3227 			dsa_tree_master_oper_state_change(dst, dev,
3228 							  netif_oper_up(dev));
3229 
3230 			/* Track when the master port is ready and can accept
3231 			 * packet.
3232 			 * NETDEV_UP event is not enough to flag a port as ready.
3233 			 * We also have to wait for linkwatch_do_dev to dev_activate
3234 			 * and emit a NETDEV_CHANGE event.
3235 			 * We check if a master port is ready by checking if the dev
3236 			 * have a qdisc assigned and is not noop.
3237 			 */
3238 			dsa_tree_master_admin_state_change(dst, dev,
3239 							   !qdisc_tx_is_noop(dev));
3240 
3241 			return NOTIFY_OK;
3242 		}
3243 
3244 		return NOTIFY_DONE;
3245 	}
3246 	case NETDEV_GOING_DOWN: {
3247 		struct dsa_port *dp, *cpu_dp;
3248 		struct dsa_switch_tree *dst;
3249 		LIST_HEAD(close_list);
3250 
3251 		if (!netdev_uses_dsa(dev))
3252 			return NOTIFY_DONE;
3253 
3254 		cpu_dp = dev->dsa_ptr;
3255 		dst = cpu_dp->ds->dst;
3256 
3257 		dsa_tree_master_admin_state_change(dst, dev, false);
3258 
3259 		list_for_each_entry(dp, &dst->ports, list) {
3260 			if (!dsa_port_is_user(dp))
3261 				continue;
3262 
3263 			if (dp->cpu_dp != cpu_dp)
3264 				continue;
3265 
3266 			list_add(&dp->slave->close_list, &close_list);
3267 		}
3268 
3269 		dev_close_many(&close_list, true);
3270 
3271 		return NOTIFY_OK;
3272 	}
3273 	default:
3274 		break;
3275 	}
3276 
3277 	return NOTIFY_DONE;
3278 }
3279 
3280 static void
3281 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3282 {
3283 	struct switchdev_notifier_fdb_info info = {};
3284 
3285 	info.addr = switchdev_work->addr;
3286 	info.vid = switchdev_work->vid;
3287 	info.offloaded = true;
3288 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3289 				 switchdev_work->orig_dev, &info.info, NULL);
3290 }
3291 
3292 static void dsa_slave_switchdev_event_work(struct work_struct *work)
3293 {
3294 	struct dsa_switchdev_event_work *switchdev_work =
3295 		container_of(work, struct dsa_switchdev_event_work, work);
3296 	const unsigned char *addr = switchdev_work->addr;
3297 	struct net_device *dev = switchdev_work->dev;
3298 	u16 vid = switchdev_work->vid;
3299 	struct dsa_switch *ds;
3300 	struct dsa_port *dp;
3301 	int err;
3302 
3303 	dp = dsa_slave_to_port(dev);
3304 	ds = dp->ds;
3305 
3306 	switch (switchdev_work->event) {
3307 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3308 		if (switchdev_work->host_addr)
3309 			err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3310 		else if (dp->lag)
3311 			err = dsa_port_lag_fdb_add(dp, addr, vid);
3312 		else
3313 			err = dsa_port_fdb_add(dp, addr, vid);
3314 		if (err) {
3315 			dev_err(ds->dev,
3316 				"port %d failed to add %pM vid %d to fdb: %d\n",
3317 				dp->index, addr, vid, err);
3318 			break;
3319 		}
3320 		dsa_fdb_offload_notify(switchdev_work);
3321 		break;
3322 
3323 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3324 		if (switchdev_work->host_addr)
3325 			err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3326 		else if (dp->lag)
3327 			err = dsa_port_lag_fdb_del(dp, addr, vid);
3328 		else
3329 			err = dsa_port_fdb_del(dp, addr, vid);
3330 		if (err) {
3331 			dev_err(ds->dev,
3332 				"port %d failed to delete %pM vid %d from fdb: %d\n",
3333 				dp->index, addr, vid, err);
3334 		}
3335 
3336 		break;
3337 	}
3338 
3339 	kfree(switchdev_work);
3340 }
3341 
3342 static bool dsa_foreign_dev_check(const struct net_device *dev,
3343 				  const struct net_device *foreign_dev)
3344 {
3345 	const struct dsa_port *dp = dsa_slave_to_port(dev);
3346 	struct dsa_switch_tree *dst = dp->ds->dst;
3347 
3348 	if (netif_is_bridge_master(foreign_dev))
3349 		return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3350 
3351 	if (netif_is_bridge_port(foreign_dev))
3352 		return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3353 
3354 	/* Everything else is foreign */
3355 	return true;
3356 }
3357 
3358 static int dsa_slave_fdb_event(struct net_device *dev,
3359 			       struct net_device *orig_dev,
3360 			       unsigned long event, const void *ctx,
3361 			       const struct switchdev_notifier_fdb_info *fdb_info)
3362 {
3363 	struct dsa_switchdev_event_work *switchdev_work;
3364 	struct dsa_port *dp = dsa_slave_to_port(dev);
3365 	bool host_addr = fdb_info->is_local;
3366 	struct dsa_switch *ds = dp->ds;
3367 
3368 	if (ctx && ctx != dp)
3369 		return 0;
3370 
3371 	if (!dp->bridge)
3372 		return 0;
3373 
3374 	if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3375 		if (dsa_port_offloads_bridge_port(dp, orig_dev))
3376 			return 0;
3377 
3378 		/* FDB entries learned by the software bridge or by foreign
3379 		 * bridge ports should be installed as host addresses only if
3380 		 * the driver requests assisted learning.
3381 		 */
3382 		if (!ds->assisted_learning_on_cpu_port)
3383 			return 0;
3384 	}
3385 
3386 	/* Also treat FDB entries on foreign interfaces bridged with us as host
3387 	 * addresses.
3388 	 */
3389 	if (dsa_foreign_dev_check(dev, orig_dev))
3390 		host_addr = true;
3391 
3392 	/* Check early that we're not doing work in vain.
3393 	 * Host addresses on LAG ports still require regular FDB ops,
3394 	 * since the CPU port isn't in a LAG.
3395 	 */
3396 	if (dp->lag && !host_addr) {
3397 		if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3398 			return -EOPNOTSUPP;
3399 	} else {
3400 		if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3401 			return -EOPNOTSUPP;
3402 	}
3403 
3404 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3405 	if (!switchdev_work)
3406 		return -ENOMEM;
3407 
3408 	netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3409 		   event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3410 		   orig_dev->name, fdb_info->addr, fdb_info->vid,
3411 		   host_addr ? " as host address" : "");
3412 
3413 	INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
3414 	switchdev_work->event = event;
3415 	switchdev_work->dev = dev;
3416 	switchdev_work->orig_dev = orig_dev;
3417 
3418 	ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3419 	switchdev_work->vid = fdb_info->vid;
3420 	switchdev_work->host_addr = host_addr;
3421 
3422 	dsa_schedule_work(&switchdev_work->work);
3423 
3424 	return 0;
3425 }
3426 
3427 /* Called under rcu_read_lock() */
3428 static int dsa_slave_switchdev_event(struct notifier_block *unused,
3429 				     unsigned long event, void *ptr)
3430 {
3431 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3432 	int err;
3433 
3434 	switch (event) {
3435 	case SWITCHDEV_PORT_ATTR_SET:
3436 		err = switchdev_handle_port_attr_set(dev, ptr,
3437 						     dsa_slave_dev_check,
3438 						     dsa_slave_port_attr_set);
3439 		return notifier_from_errno(err);
3440 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3441 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3442 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3443 							   dsa_slave_dev_check,
3444 							   dsa_foreign_dev_check,
3445 							   dsa_slave_fdb_event);
3446 		return notifier_from_errno(err);
3447 	default:
3448 		return NOTIFY_DONE;
3449 	}
3450 
3451 	return NOTIFY_OK;
3452 }
3453 
3454 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
3455 					      unsigned long event, void *ptr)
3456 {
3457 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3458 	int err;
3459 
3460 	switch (event) {
3461 	case SWITCHDEV_PORT_OBJ_ADD:
3462 		err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3463 							    dsa_slave_dev_check,
3464 							    dsa_foreign_dev_check,
3465 							    dsa_slave_port_obj_add);
3466 		return notifier_from_errno(err);
3467 	case SWITCHDEV_PORT_OBJ_DEL:
3468 		err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3469 							    dsa_slave_dev_check,
3470 							    dsa_foreign_dev_check,
3471 							    dsa_slave_port_obj_del);
3472 		return notifier_from_errno(err);
3473 	case SWITCHDEV_PORT_ATTR_SET:
3474 		err = switchdev_handle_port_attr_set(dev, ptr,
3475 						     dsa_slave_dev_check,
3476 						     dsa_slave_port_attr_set);
3477 		return notifier_from_errno(err);
3478 	}
3479 
3480 	return NOTIFY_DONE;
3481 }
3482 
3483 static struct notifier_block dsa_slave_nb __read_mostly = {
3484 	.notifier_call  = dsa_slave_netdevice_event,
3485 };
3486 
3487 struct notifier_block dsa_slave_switchdev_notifier = {
3488 	.notifier_call = dsa_slave_switchdev_event,
3489 };
3490 
3491 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
3492 	.notifier_call = dsa_slave_switchdev_blocking_event,
3493 };
3494 
3495 int dsa_slave_register_notifier(void)
3496 {
3497 	struct notifier_block *nb;
3498 	int err;
3499 
3500 	err = register_netdevice_notifier(&dsa_slave_nb);
3501 	if (err)
3502 		return err;
3503 
3504 	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
3505 	if (err)
3506 		goto err_switchdev_nb;
3507 
3508 	nb = &dsa_slave_switchdev_blocking_notifier;
3509 	err = register_switchdev_blocking_notifier(nb);
3510 	if (err)
3511 		goto err_switchdev_blocking_nb;
3512 
3513 	return 0;
3514 
3515 err_switchdev_blocking_nb:
3516 	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3517 err_switchdev_nb:
3518 	unregister_netdevice_notifier(&dsa_slave_nb);
3519 	return err;
3520 }
3521 
3522 void dsa_slave_unregister_notifier(void)
3523 {
3524 	struct notifier_block *nb;
3525 	int err;
3526 
3527 	nb = &dsa_slave_switchdev_blocking_notifier;
3528 	err = unregister_switchdev_blocking_notifier(nb);
3529 	if (err)
3530 		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3531 
3532 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3533 	if (err)
3534 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3535 
3536 	err = unregister_netdevice_notifier(&dsa_slave_nb);
3537 	if (err)
3538 		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
3539 }
3540